diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll index e391e1cd05a1..3b2bbbe96895 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll @@ -18,58 +18,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv16i16_nxv16i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i32) @@ -86,432 +48,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv16i16_nxv16i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv32i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv32i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv64i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv64i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv32i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv32i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i32) @@ -528,126 +78,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv16i16_nxv16i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv16i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv16i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i32) @@ -664,160 +108,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv16i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv32i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv32i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i32) @@ -834,194 +138,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv64i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv64i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i32) @@ -1038,229 +168,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv32i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv32i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv16i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv16i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i32) @@ -1277,165 +198,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv16i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv32i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv32i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i32) @@ -1452,200 +230,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv64i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv64i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i32) @@ -1662,236 +262,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv32i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv32i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv16i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv16i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i32) @@ -1908,170 +294,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv16i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv32i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv32i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i32) @@ -2088,206 +327,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv64i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv64i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i32) @@ -2304,243 +360,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv32i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv32i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv16i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv16i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i32) @@ -2557,175 +393,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv16i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv32i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv32i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i32) @@ -2742,212 +427,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv64i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv64i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i32) @@ -2964,250 +461,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv32i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv32i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv16i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv16i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i32) @@ -3224,180 +495,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv16i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv32i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv32i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i32) @@ -3414,218 +530,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv64i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv64i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i32) @@ -3642,257 +565,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv32i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv32i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv16i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv16i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i32) @@ -3909,11 +600,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -3921,173 +611,15 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i8(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv16i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv32i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv32i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i32) @@ -4104,11 +636,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -4116,212 +647,15 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i32(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv64i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv64i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i32) @@ -4338,11 +672,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -4350,252 +683,15 @@ define @test_vloxseg7_mask_nxv1i8_nxv1i16(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv32i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv32i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv16i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv16i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i32) @@ -4612,51 +708,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv16i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -4665,137 +720,15 @@ define @test_vloxseg8_mask_nxv1i8_nxv16i8(i8* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv32i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv32i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i32) @@ -4812,51 +745,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -4865,177 +757,15 @@ define @test_vloxseg8_mask_nxv1i8_nxv8i16(i8* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv64i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv64i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i32) @@ -5052,211 +782,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv32i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv32i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv16i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -5265,15 +794,13 @@ define @test_vloxseg8_mask_nxv1i8_nxv4i32(i8* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8*, , i32) @@ -5292,58 +819,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i32) @@ -5360,432 +849,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv32i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv32i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv64i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv64i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv32i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv32i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i32) @@ -5802,92 +879,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv16i8_nxv16i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv16i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i32) @@ -5904,60 +909,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i32) @@ -5974,445 +940,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv32i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv32i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv64i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv64i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv32i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv32i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i32) @@ -6429,95 +972,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv16i8_nxv16i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv16i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv16i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv16i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i32) @@ -6534,62 +1003,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 +; CHECK-NEXT: vmv2r.v v16, v8 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v12, v0.t +; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i32) @@ -6606,458 +1036,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv32i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv32i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv64i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv64i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv64i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv32i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv32i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i32) @@ -7074,200 +1069,22 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv16i8_nxv16i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv2r.v v12, v6 ; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 +; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv16i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv16i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv16i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv16i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv16i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv16i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv16i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i32) @@ -7284,364 +1101,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv32i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv32i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv32i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv8i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv64i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv64i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv64i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv1i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv32i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv32i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv32i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i32) @@ -7658,58 +1131,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv16i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv16i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i32) @@ -7726,163 +1161,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv4i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv16i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv16i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv16i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv16i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i32) @@ -7899,375 +1191,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv32i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv32i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv32i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i32(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i32(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv8i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv64i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv64i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv64i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv1i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv32i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv32i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv32i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i32) @@ -8284,60 +1223,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv16i32(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv16i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i32) @@ -8354,168 +1255,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i32(,,, i32*, , , i32) - -define @test_vloxseg3_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv4i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv16i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv16i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i32) @@ -8532,386 +1287,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv32i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv32i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i32(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i32(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv64i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv64i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv32i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv32i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i32) @@ -8928,62 +1320,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv16i32(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i32) @@ -9000,173 +1353,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i32(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv16i16(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i8(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv16i8(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i32) @@ -9183,397 +1386,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i16(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv32i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv32i16(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i32(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i32(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i16(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i8(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i32(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i32(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv64i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv64i8(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i8(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i16(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv32i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv32i8(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i32) @@ -9590,64 +1420,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i32(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv16i32(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i32) @@ -9664,178 +1454,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i32(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i32(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv16i16(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i8(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv16i8(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i32) @@ -9852,408 +1488,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i16(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv32i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv32i16(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i32(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i32(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i16(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i8(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i32(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i32(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv64i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv64i8(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i8(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i16(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv32i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv32i8(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i32) @@ -10270,66 +1523,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i32(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv16i32(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i32) @@ -10346,183 +1558,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i32(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i32(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv16i16(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i8(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv16i8(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i32) @@ -10539,11 +1593,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -10551,407 +1604,15 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i32(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i16(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv32i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv32i16(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i32(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i32(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i16(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i8(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i32(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i32(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv64i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv64i8(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i8(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i16(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv32i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv32i8(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i32) @@ -10968,11 +1629,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -10980,56 +1640,15 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i8(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i32(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv16i32(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i32) @@ -11046,11 +1665,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -11058,176 +1676,15 @@ define @test_vloxseg7_mask_nxv2i32_nxv2i16(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i32(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i32(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv16i16(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i8(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv16i8(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i32) @@ -11244,171 +1701,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i16(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv32i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv32i16(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i32(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i32(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i16(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -11417,257 +1713,15 @@ define @test_vloxseg8_mask_nxv2i32_nxv8i16(i32* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i8(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i32(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i32(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv64i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv64i8(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i8(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i16(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv32i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv32i8(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i32) @@ -11684,70 +1738,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i32(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv16i32(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i32) @@ -11764,51 +1775,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i32(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i32(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -11817,151 +1787,13 @@ define @test_vloxseg8_mask_nxv2i32_nxv4i32(i32* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv16i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv16i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16*, , i32) @@ -11980,228 +1812,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv32i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv32i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv64i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv64i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i32) @@ -12218,194 +1842,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv32i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv32i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv16i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i32) @@ -12422,164 +1872,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv16i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv16i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv16i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv16i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i32) @@ -12596,235 +1902,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv32i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv32i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv32i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv8i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv64i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv64i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv64i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i32) @@ -12841,200 +1934,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv1i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv32i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv32i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv32i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv16i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv16i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv2i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i32) @@ -13051,169 +1966,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv16i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv16i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i32) @@ -13230,242 +1997,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv32i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv32i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv64i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv64i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i32) @@ -13482,206 +2030,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv32i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv32i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv16i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i32) @@ -13698,174 +2063,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv16i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv16i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i32(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i32) @@ -13882,249 +2096,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv32i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv32i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i32(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i32(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv64i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv64i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i32) @@ -14141,212 +2130,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv32i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv32i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv16i32(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i32) @@ -14363,179 +2164,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv16i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv16i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i32(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i32) @@ -14552,256 +2198,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv32i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv32i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i32(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i32(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv64i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv64i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i32) @@ -14818,218 +2233,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv32i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv32i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv16i32(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i32) @@ -15046,184 +2268,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv16i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv16i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i32(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i32) @@ -15240,11 +2303,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -15252,251 +2314,15 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i16(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv32i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv32i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i32(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i32(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv64i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv64i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i32) @@ -15513,11 +2339,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -15525,212 +2350,15 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i8(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv32i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv32i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv16i32(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i32) @@ -15747,11 +2375,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -15759,177 +2386,15 @@ define @test_vloxseg7_mask_nxv4i16_nxv4i32(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv16i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv16i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i32(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i32) @@ -15946,131 +2411,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv32i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv32i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i32(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -16079,137 +2423,15 @@ define @test_vloxseg8_mask_nxv4i16_nxv8i16(i16* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i32(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv64i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv64i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i32) @@ -16226,230 +2448,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv32i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv32i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv16i32(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i32) @@ -16466,62 +2485,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv16i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv16i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32*, , i32) @@ -16540,160 +2522,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv16i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv16i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv32i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv32i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv32i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i32) @@ -16710,194 +2552,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv8i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv64i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv64i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv64i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i32) @@ -16914,229 +2582,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv32i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv32i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv32i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv16i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv16i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv2i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv4i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv16i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv16i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i32) @@ -17153,165 +2612,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv16i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv16i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i32(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv32i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv32i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv32i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i32) @@ -17328,200 +2644,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i32(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv8i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv64i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv64i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv64i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i32) @@ -17538,236 +2676,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv32i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv32i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv32i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv16i32(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv16i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv2i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i32(,,, i32*, , , i32) - -define @test_vloxseg3_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv4i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv16i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i32) @@ -17784,170 +2708,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv16i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i32(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv32i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv32i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i32) @@ -17964,206 +2741,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i32(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv64i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv64i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i32) @@ -18180,243 +2774,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv32i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv32i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv16i32(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i32(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv16i16(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i32) @@ -18433,175 +2807,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv16i8(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i32(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i32(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i16(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv32i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv32i16(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i32) @@ -18618,212 +2841,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i16(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i8(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i32(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i32(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv64i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv64i8(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i8(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i32) @@ -18840,250 +2875,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv32i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv32i8(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i8(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i8(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i32(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv16i32(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i16(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i16(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i32(i32*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i32(,,,,, i32*, , , i32) - -define @test_vloxseg5_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv16i16(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i32) @@ -19100,180 +2909,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv16i8(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i32(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i32(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i16(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv32i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv32i16(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i32) @@ -19290,218 +2944,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i16(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i8(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i32(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i32(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv64i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv64i8(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i8(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i32) @@ -19518,257 +2979,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv32i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv32i8(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i8(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i8(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i32(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv16i32(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i16(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i16(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i32(i32*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i32(,,,,,, i32*, , , i32) - -define @test_vloxseg6_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv16i16(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i32) @@ -19785,11 +3014,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -19797,173 +3025,15 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i8(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv16i8(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i32(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i32(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i16(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv32i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv32i16(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i32) @@ -19980,11 +3050,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -19992,212 +3061,15 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i32(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i16(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i8(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i32(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i32(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv64i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv64i8(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i8(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i32) @@ -20214,11 +3086,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -20226,252 +3097,15 @@ define @test_vloxseg7_mask_nxv1i32_nxv1i16(i32* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv32i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv32i8(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i8(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i8(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i32(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv16i32(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i16(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i16(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i32(i32*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i32(,,,,,,, i32*, , , i32) - -define @test_vloxseg7_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv16i16(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i32) @@ -20488,51 +3122,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv16i8(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -20541,137 +3134,15 @@ define @test_vloxseg8_mask_nxv1i32_nxv16i8(i32* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i32(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i32(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i16(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv32i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv32i16(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i32) @@ -20688,51 +3159,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i16(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -20741,177 +3171,15 @@ define @test_vloxseg8_mask_nxv1i32_nxv8i16(i32* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i8(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i32(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i32(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv64i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv64i8(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i8(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i32) @@ -20928,211 +3196,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv32i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv32i8(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i8(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i8(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i32(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv16i32(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i16(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i16(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i32(i32*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i32(,,,,,,,, i32*, , , i32) - -define @test_vloxseg8_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -21141,253 +3208,13 @@ define @test_vloxseg8_mask_nxv1i32_nxv4i32(i32* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv16i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv16i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv32i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv32i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16*, , i32) @@ -21406,22 +3233,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16*, , i32) @@ -21440,22 +3263,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16*, , i32) @@ -21474,541 +3293,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv64i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv64i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv64i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv32i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv32i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv16i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv8i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv16i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv16i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv16i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv16i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv32i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv32i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv32i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i32) @@ -22025,23 +3323,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16*, , i32) @@ -22060,23 +3355,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16*, , i32) @@ -22095,557 +3387,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv64i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv64i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv64i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv64i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv1i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv32i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv32i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv32i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv16i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv16i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv2i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv8i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv4i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv16i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv16i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv16i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv16i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv32i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv32i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv32i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i32) @@ -22662,24 +3418,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16*, , i32) @@ -22698,24 +3451,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16*, , i32) @@ -22734,552 +3484,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv64i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv64i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv64i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv1i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv32i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv32i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv32i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv16i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv16i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv2i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv8i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv4i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv16i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv16i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv32i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv32i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i32) @@ -23296,22 +3517,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8*, , i32) @@ -23330,22 +3547,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8*, , i32) @@ -23364,541 +3577,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv64i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv64i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv32i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv32i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv16i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv16i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv16i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv32i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv32i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i32) @@ -23915,23 +3607,19 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8*, , i32) @@ -23950,23 +3638,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8*, , i32) @@ -23985,557 +3670,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv64i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv64i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv32i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv32i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv16i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv16i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv16i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv32i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv32i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i32) @@ -24552,24 +3701,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8*, , i32) @@ -24588,24 +3734,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8*, , i32) @@ -24624,573 +3767,22 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv64i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv64i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv32i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv32i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv16i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv16i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv16i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv32i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv32i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i32) @@ -25207,25 +3799,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8*, , i32) @@ -25244,25 +3833,22 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 + ret %1 } declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8*, , i32) @@ -25281,589 +3867,23 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv1r.v v11, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg5ei32.v v7, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv64i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv64i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv32i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv32i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv16i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv16i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv16i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv32i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv32i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i32) @@ -25880,26 +3900,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8*, , i32) @@ -25918,26 +3935,23 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 + ret %1 } declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8*, , i32) @@ -25956,605 +3970,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v12, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv64i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv64i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv32i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv32i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv16i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv16i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv16i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv32i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv32i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i32) @@ -26571,11 +4005,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -26583,15 +4016,13 @@ define @test_vloxseg7_mask_nxv8i8_nxv8i16(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8*, , i32) @@ -26610,11 +4041,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -26622,15 +4052,13 @@ define @test_vloxseg7_mask_nxv8i8_nxv8i8(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 + ret %1 } declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8*, , i32) @@ -26649,11 +4077,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -26661,609 +4088,15 @@ define @test_vloxseg7_mask_nxv8i8_nxv8i32(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv64i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv64i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv32i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv32i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv16i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv16i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv16i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv32i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv32i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i32) @@ -27280,28 +4113,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8*, , i32) @@ -27320,28 +4150,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8*, , i32) @@ -27360,51 +4187,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv64i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv64i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: vmv1r.v v16, v8 ; CHECK-NEXT: vmv1r.v v17, v16 ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 @@ -27413,535 +4199,15 @@ define @test_vloxseg8_mask_nxv8i8_nxv64i8(i8* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv8i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv32i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv32i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv16i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv8i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv16i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv16i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv1i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv16i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv16i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv2i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv4i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv32i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv32i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv32i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv1i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i32) @@ -27958,22 +4224,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i32_nxv8i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32*, , i32) @@ -27992,22 +4254,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i32_nxv8i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32*, , i32) @@ -28026,432 +4284,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8i32_nxv8i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv64i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv64i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv64i8(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv64i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv4i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv1i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv1i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv32i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv32i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv32i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv2i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv16i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv16i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv2i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv2i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv8i32_nxv4i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8i32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv4i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv16i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv16i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i32) @@ -28468,228 +4314,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv32i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv32i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv64i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv64i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i32) @@ -28706,194 +4344,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv32i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv32i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv16i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i32) @@ -28910,164 +4374,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv16i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv16i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i32) @@ -29084,235 +4404,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv32i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv32i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv64i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv64i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i32) @@ -29329,200 +4436,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv32i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv32i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv16i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i32) @@ -29539,169 +4468,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv16i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv16i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i32) @@ -29718,242 +4499,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv32i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv32i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv64i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv64i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i32) @@ -29970,206 +4532,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv32i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv32i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv16i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i32) @@ -30186,174 +4565,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv16i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv16i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i32) @@ -30370,249 +4598,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv32i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv32i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv64i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv64i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i32) @@ -30629,212 +4632,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv32i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv32i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv16i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i32) @@ -30851,179 +4666,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv16i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv16i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i32) @@ -31040,256 +4700,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv32i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv32i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv64i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv64i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i32) @@ -31306,218 +4735,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv32i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv32i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv16i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i32) @@ -31534,184 +4770,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv16i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv16i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i32) @@ -31728,11 +4805,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -31740,251 +4816,15 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i16(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv32i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv32i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv64i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv64i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i32) @@ -32001,11 +4841,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -32013,212 +4852,15 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i8(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv32i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv32i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv16i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i32) @@ -32235,11 +4877,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -32247,177 +4888,15 @@ define @test_vloxseg7_mask_nxv4i8_nxv4i32(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv16i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv16i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i32) @@ -32434,131 +4913,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv32i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv32i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -32567,137 +4925,15 @@ define @test_vloxseg8_mask_nxv4i8_nxv8i16(i8* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv64i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv64i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i32) @@ -32714,230 +4950,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv32i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv32i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv16i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i32) @@ -32954,62 +4987,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv16i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16*, , i32) @@ -33028,160 +5024,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv16i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv32i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv32i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i32) @@ -33198,194 +5054,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv64i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv64i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i32) @@ -33402,229 +5084,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv32i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv32i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv16i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv16i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv16i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i32) @@ -33641,165 +5114,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv16i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv16i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv32i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv32i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv32i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i32) @@ -33816,200 +5146,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv8i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv64i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv64i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv64i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i32) @@ -34026,236 +5178,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv32i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv32i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv32i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv16i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv16i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv2i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv4i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv16i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i32) @@ -34272,170 +5210,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv16i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv32i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv32i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i32) @@ -34452,206 +5243,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv64i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv64i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i32) @@ -34668,243 +5276,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv32i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv32i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv16i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv16i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i32) @@ -34921,175 +5309,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv16i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i32(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv32i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv32i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i32) @@ -35106,212 +5343,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i32(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv64i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv64i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i32) @@ -35328,250 +5377,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv32i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv32i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv16i32(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i32(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv16i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i32) @@ -35588,180 +5411,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv16i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i32(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv32i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv32i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i32) @@ -35778,218 +5446,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i32(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv64i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv64i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i32) @@ -36006,257 +5481,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv32i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv32i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv16i32(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i32(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv16i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i32) @@ -36273,11 +5516,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -36285,173 +5527,15 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i8(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv16i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i32(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv32i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv32i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i32) @@ -36468,11 +5552,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -36480,212 +5563,15 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i32(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i32(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv64i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv64i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i32) @@ -36702,11 +5588,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -36714,252 +5599,15 @@ define @test_vloxseg7_mask_nxv1i16_nxv1i16(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv32i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv32i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv16i32(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i32(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv16i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i32) @@ -36976,51 +5624,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv16i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -37029,137 +5636,15 @@ define @test_vloxseg8_mask_nxv1i16_nxv16i8(i16* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i32(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv32i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv32i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i32) @@ -37176,51 +5661,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -37229,177 +5673,15 @@ define @test_vloxseg8_mask_nxv1i16_nxv8i16(i16* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i32(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv64i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv64i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i32) @@ -37416,211 +5698,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv32i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv32i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv16i32(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i32(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -37629,185 +5710,13 @@ define @test_vloxseg8_mask_nxv1i16_nxv4i32(i16* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv16i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv16i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv2i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8*, , i32) @@ -37826,262 +5735,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv32i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv32i8_nxv32i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv32i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv64i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv64i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i32) @@ -38098,262 +5765,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv32i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv32i8_nxv32i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv2i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i8(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv32i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv16i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv2i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv32i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv32i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv32i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv16i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv16i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv16i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv16i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i32) @@ -38370,364 +5795,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv32i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv32i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv32i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv8i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv64i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv64i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv64i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i16(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i16(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv1i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv32i8(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv32i8(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv32i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i32) @@ -38744,58 +5825,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv16i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv16i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i32) @@ -38812,163 +5855,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i32(i8*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i32(,, i8*, , , i32) - -define @test_vloxseg2_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv4i32( %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv16i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv16i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv16i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv16i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i32) @@ -38985,375 +5885,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv32i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv32i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv32i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv8i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv64i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv64i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv64i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i16(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i16(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv1i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv32i8(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv32i8(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv32i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i32) @@ -39370,60 +5917,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv16i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv16i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i32) @@ -39440,168 +5949,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i32(i8*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i32(,,, i8*, , , i32) - -define @test_vloxseg3_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv4i32( %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv16i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv16i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i32) @@ -39618,386 +5981,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv32i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv32i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv64i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv64i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i16(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i16(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv32i8(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv32i8(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i32) @@ -40014,62 +6014,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv16i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i32) @@ -40086,173 +6047,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i32(i8*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i32(,,,, i8*, , , i32) - -define @test_vloxseg4_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv16i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv16i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i32) @@ -40269,397 +6080,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv32i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv32i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv64i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv64i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i16(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i16(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv32i8(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv32i8(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i32) @@ -40676,64 +6114,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv16i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i32) @@ -40750,178 +6148,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i32(i8*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i32(,,,,, i8*, , , i32) - -define @test_vloxseg5_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv16i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv16i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i32) @@ -40938,408 +6182,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv32i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv32i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv64i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv64i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i16(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i16(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv32i8(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv32i8(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i32) @@ -41356,66 +6217,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv16i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i32) @@ -41432,183 +6252,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i32(i8*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i32(,,,,,, i8*, , , i32) - -define @test_vloxseg6_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv16i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv16i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i32) @@ -41625,11 +6287,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -41637,407 +6298,15 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i32(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv32i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv32i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv64i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv64i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i16(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i16(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv32i8(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv32i8(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i32) @@ -42054,11 +6323,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -42066,56 +6334,15 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i8(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv16i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i32) @@ -42132,11 +6359,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -42144,176 +6370,15 @@ define @test_vloxseg7_mask_nxv2i8_nxv2i16(i8* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i32(i8*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i32(,,,,,,, i8*, , , i32) - -define @test_vloxseg7_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv16i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv16i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv1i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv16i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv16i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i32) @@ -42330,171 +6395,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv4i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv32i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv32i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv32i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv32i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv1i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv8i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -42503,257 +6407,15 @@ define @test_vloxseg8_mask_nxv2i8_nxv8i16(i8* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv8i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv8i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv8i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv64i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv64i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv64i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv64i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv4i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i16(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i16(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv1i16(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv1i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv32i8(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv32i8(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv32i8(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv32i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i32) @@ -42770,70 +6432,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i8(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv16i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv16i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv16i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i32) @@ -42850,51 +6469,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv2i16(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i32(i8*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i32(,,,,,,,, i8*, , , i32) - -define @test_vloxseg8_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i8_nxv4i32(i8* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -42903,117 +6481,13 @@ define @test_vloxseg8_mask_nxv2i8_nxv4i32(i8* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv2i8.nxv4i32(i8* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i8* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv16i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv16i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv16i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv16i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16*, , i32) @@ -43032,364 +6506,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv32i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv32i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv32i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv8i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv64i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv64i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv64i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i16(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i16(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv1i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv32i8(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv32i8(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv32i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i32) @@ -43406,58 +6536,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv16i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv16i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i32) @@ -43474,163 +6566,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i32(i16*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i32(,, i16*, , , i32) - -define @test_vloxseg2_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv4i32( %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv16i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv16i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv16i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv16i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i32) @@ -43647,375 +6596,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv32i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv32i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv32i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv8i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv64i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv64i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv64i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i16(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i16(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv1i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv32i8(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv32i8(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv32i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i32) @@ -44032,60 +6628,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv16i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv16i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i32) @@ -44102,168 +6660,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i32(i16*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i32(,,, i16*, , , i32) - -define @test_vloxseg3_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv4i32( %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv16i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv16i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i32) @@ -44280,386 +6692,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv32i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv32i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv64i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv64i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i16(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i16(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv32i8(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv32i8(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i32) @@ -44676,62 +6725,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv16i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i32) @@ -44748,173 +6758,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i32(i16*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i32(,,,, i16*, , , i32) - -define @test_vloxseg4_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv16i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv16i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i32) @@ -44931,397 +6791,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv32i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv32i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i32(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i32(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv64i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv64i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i16(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i16(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv32i8(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv32i8(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i32) @@ -45338,64 +6825,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv16i32(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i32) @@ -45412,178 +6859,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i32(i16*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i32(,,,,, i16*, , , i32) - -define @test_vloxseg5_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv16i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv16i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i32) @@ -45600,408 +6893,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv32i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv32i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i32(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i32(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv64i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv64i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i16(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i16(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv32i8(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv32i8(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i32) @@ -46018,66 +6928,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv16i32(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i32) @@ -46094,183 +6963,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i32(i16*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i32(,,,,,, i16*, , , i32) - -define @test_vloxseg6_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv16i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv16i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i32) @@ -46287,11 +6998,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -46299,407 +7009,15 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i32(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv32i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv32i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i32(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i32(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv64i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv64i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i16(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i16(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv32i8(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv32i8(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i32) @@ -46716,11 +7034,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -46728,56 +7045,15 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i8(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv16i32(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i32) @@ -46794,11 +7070,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -46806,176 +7081,15 @@ define @test_vloxseg7_mask_nxv2i16_nxv2i16(i16* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i32(i16*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i32(,,,,,,, i16*, , , i32) - -define @test_vloxseg7_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv16i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv16i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv1i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv16i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv16i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i32) @@ -46992,171 +7106,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv4i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv32i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv32i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv32i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv32i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i32(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv1i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv8i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -47165,257 +7118,15 @@ define @test_vloxseg8_mask_nxv2i16_nxv8i16(i16* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv8i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i32(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv8i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv8i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv64i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv64i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv64i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv64i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv4i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i16(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i16(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv1i16(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv1i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv32i8(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv32i8(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv32i8(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv32i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i32) @@ -47432,70 +7143,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i8(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv16i32(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv16i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv16i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i32) @@ -47512,51 +7180,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv2i16(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i32(i16*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i32(,,,,,,,, i16*, , , i32) - -define @test_vloxseg8_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2i16_nxv4i32(i16* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -47565,151 +7192,13 @@ define @test_vloxseg8_mask_nxv2i16_nxv4i32(i16* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv2i16.nxv4i32(i16* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, i16* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv16i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv16i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv1i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv16i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv16i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv2i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32*, , i32) @@ -47728,228 +7217,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv32i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv32i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv32i16(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv32i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv1i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv8i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv64i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv64i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv64i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i32) @@ -47966,194 +7247,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv1i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv1i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv32i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv32i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv32i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i8(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i8(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv2i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i8( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i32(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv16i32(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv16i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i16(i32*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i16(,, i32*, , , i32) - -define @test_vloxseg2_nxv4i32_nxv2i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv2i16( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i32) @@ -48170,164 +7277,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32( %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv16i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv16i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv1i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv16i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv16i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i32(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv2i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i32) @@ -48344,235 +7307,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv32i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv32i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv32i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv32i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i32(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv1i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i32(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv8i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv64i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv64i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv64i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i32) @@ -48589,200 +7339,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv1i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i16(i32* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv1i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv32i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv32i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv32i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i8(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i8(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv2i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i8( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i32(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv16i32(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv16i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i16(i32*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i16(,,, i32*, , , i32) - -define @test_vloxseg3_nxv4i32_nxv2i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv2i16( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i32) @@ -48799,169 +7371,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv16i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv16i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv16i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv16i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv1i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv1i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv16i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv16i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv16i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv16i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i32(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv2i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv2i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i32) @@ -48978,242 +7403,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv32i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv32i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv32i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv32i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv32i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv32i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i32(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv1i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv1i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv8i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv8i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv8i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv8i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i32(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv8i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv8i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv8i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv8i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv64i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv64i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv64i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv64i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv64i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv64i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv64i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i32) @@ -49230,206 +7436,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv1i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv1i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv1i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv1i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv32i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv32i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv32i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv32i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv32i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv32i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i8(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i8(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv2i8(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i8(i32* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv2i8(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i8(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i8( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i32(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv16i32(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv16i32(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv16i32(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv16i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv16i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i16(i32*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i16(,,,, i32*, , , i32) - -define @test_vloxseg4_nxv4i32_nxv2i16(i32* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4i32_nxv2i16(i32* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv2i16(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv2i16( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i32) @@ -49446,24 +7469,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32( %1, %1, %1, %1, i32* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half*, , i32) @@ -49482,58 +7502,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv16f16_nxv16i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i8(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i32) @@ -49550,432 +7532,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv16f16_nxv16i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i32(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i16(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv32i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv32i16(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv32i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i32(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i16(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i8(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i32(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv8i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv64i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv64i8(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv64i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i8(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i16(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv1i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv32i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv32i8(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv32i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i8(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i32) @@ -49992,228 +7562,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv16f16_nxv16i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i16(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv16f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv2i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i32(,, half*, , , i32) - -define @test_vloxseg2_nxv16f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv16f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv16f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv4i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv16i16(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv16i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv16i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv16i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i8(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv1i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv1i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv16i8(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv16i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv16i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv16i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i32(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv2i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv2i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i32) @@ -50230,228 +7592,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i16(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f64_nxv4i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv32i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv32i16(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv32i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv32i16(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv32i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv32i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i32(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv1i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv1i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i16(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv8i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv8i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i8(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv8i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv8i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i32(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv8i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv8i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv8i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv64i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv64i8(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv64i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv64i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv64i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i32) @@ -50468,194 +7622,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i8(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f64_nxv4i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i16(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv1i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i16(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv1i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv1i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv32i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv32i8(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv32i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv32i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv32i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i8(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv2i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv2i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv16i32(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv16i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv16i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv16i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i16(,, double*, , , i32) - -define @test_vloxseg2_nxv4f64_nxv2i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f64_nxv2i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv2i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i32) @@ -50672,56 +7652,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f64_nxv4i32(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f64_nxv4i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv16i16(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv16i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv16i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv16i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double*, , i32) @@ -50740,160 +7682,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i8(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv16i8(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv16i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv16i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv16i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i32(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv2i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv2i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i16(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv4i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv4i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv32i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv32i16(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv32i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv32i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv32i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i32) @@ -50910,194 +7712,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i32(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i16(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv8i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv8i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i8(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv8i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv8i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i32(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv8i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv8i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv8i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv64i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv64i8(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv64i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv64i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv64i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i8(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv4i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv4i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i32) @@ -51114,229 +7742,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv1i16(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv32i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv32i8(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv32i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f64_nxv32i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv32i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i8(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv2i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv2i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv16i32(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv16i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv16i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv16i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i16(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv2i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv2i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv2i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i32(,, double*, , , i32) - -define @test_vloxseg2_nxv1f64_nxv4i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f64_nxv4i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv4i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i16(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv16i16(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv16i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv16i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv16i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i32) @@ -51353,165 +7772,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i8(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv16i8(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv16i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv16i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv16i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i32(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i32(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv2i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv2i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i16(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i16(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv4i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv4i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv32i16(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv32i16(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv32i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv32i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv32i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i32) @@ -51528,200 +7804,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i32(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i16(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i16(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv8i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv8i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i8(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv8i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv8i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i32(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i32(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv8i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv8i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv8i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv64i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv64i8(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv64i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv64i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv64i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i8(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv4i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv4i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i32) @@ -51738,236 +7836,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv1i16(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv32i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv32i8(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv32i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f64_nxv32i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv32i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i8(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv2i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv2i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i32(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv16i32(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv16i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv16i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv16i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i16(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i16(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv2i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv2i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv2i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i32(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i32(,,, double*, , , i32) - -define @test_vloxseg3_nxv1f64_nxv4i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f64_nxv4i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv4i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i16(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv16i16(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv16i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv16i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i32) @@ -51984,170 +7868,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i8(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv16i8(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv16i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv16i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i32(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i32(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv2i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv2i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i16(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i16(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv4i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv4i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv32i16(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv32i16(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv32i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv32i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i32) @@ -52164,206 +7901,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i32(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i16(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i16(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv8i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv8i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i8(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv8i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv8i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i32(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i32(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv8i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv8i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv64i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv64i8(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv64i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv64i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i8(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv4i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv4i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i32) @@ -52380,243 +7934,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv1i16(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv32i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv32i8(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv32i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f64_nxv32i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i8(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv2i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv2i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i32(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv16i32(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv16i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv16i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i16(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i16(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv2i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv2i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i32(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i32(,,,, double*, , , i32) - -define @test_vloxseg4_nxv1f64_nxv4i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f64_nxv4i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i16(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv16i16(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv16i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv16i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i32) @@ -52633,175 +7967,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i8(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i8(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv16i8(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv16i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv16i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i32(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i32(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv2i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv2i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i16(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i16(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv4i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv4i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv32i16(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv32i16(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv32i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv32i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i32) @@ -52818,212 +8001,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i32(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i16(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i16(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv8i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv8i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i8(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i8(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv8i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv8i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i32(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i32(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv8i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv8i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv64i8(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv64i8(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv64i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv64i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i8(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i8(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv4i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv4i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i32) @@ -53040,250 +8035,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv1i16(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv32i8(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv32i8(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv32i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f64_nxv32i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i8(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i8(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv2i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv2i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i32(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv16i32(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv16i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv16i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i16(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i16(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv2i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv2i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i32(double*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i32(,,,,, double*, , , i32) - -define @test_vloxseg5_nxv1f64_nxv4i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f64_nxv4i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i16(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv16i16(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv16i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv16i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i32) @@ -53300,180 +8069,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i8(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i8(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv16i8(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv16i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv16i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i32(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i32(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv2i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv2i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i16(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i16(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv4i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv4i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv32i16(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv32i16(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv32i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv32i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i32) @@ -53490,218 +8104,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i32(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i16(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i16(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv8i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv8i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i8(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i8(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv8i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv8i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i32(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i32(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv8i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv8i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv64i8(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv64i8(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv64i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv64i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i8(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i8(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv4i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv4i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i32) @@ -53718,257 +8139,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv1i16(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv32i8(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv32i8(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv32i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f64_nxv32i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i8(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i8(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv2i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv2i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i32(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv16i32(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv16i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv16i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i16(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i16(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv2i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv2i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i32(double*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i32(,,,,,, double*, , , i32) - -define @test_vloxseg6_nxv1f64_nxv4i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f64_nxv4i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i16(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv16i16(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv16i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv16i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i32) @@ -53985,11 +8174,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i8(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -53997,173 +8185,15 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i8(double* %base, < ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i8(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv16i8(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv16i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv16i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i32(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i32(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv2i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv2i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i16(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i16(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv4i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv4i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv32i16(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv32i16(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv32i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv32i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i32) @@ -54180,11 +8210,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i32(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -54192,212 +8221,15 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i32(double* %base, ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i16(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i16(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv8i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv8i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i8(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i8(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv8i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv8i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i32(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i32(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv8i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv8i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv64i8(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv64i8(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv64i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv64i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i8(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i8(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv4i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv4i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i32) @@ -54414,11 +8246,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv1i16(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -54426,252 +8257,15 @@ define @test_vloxseg7_mask_nxv1f64_nxv1i16(double* %base, ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vmv1r.v v7, v1 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv32i8(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv32i8(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv32i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f64_nxv32i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i8(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i8(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv2i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv2i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i32(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv16i32(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv16i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv16i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i16(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i16(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv2i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv2i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i32(double*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i32(,,,,,,, double*, , , i32) - -define @test_vloxseg7_nxv1f64_nxv4i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f64_nxv4i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i16(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv16i16(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv16i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv16i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i32) @@ -54688,51 +8282,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i8(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i8(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv16i8(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv16i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv16i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -54741,137 +8294,15 @@ define @test_vloxseg8_mask_nxv1f64_nxv16i8(double* %base, ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i32(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i32(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv2i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv2i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i16(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i16(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv4i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv4i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv32i16(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv32i16(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv32i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv32i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i32) @@ -54888,51 +8319,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i32(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i16(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i16(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv8i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv8i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -54941,177 +8331,15 @@ define @test_vloxseg8_mask_nxv1f64_nxv8i16(double* %base, ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8, v0.t +; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i8(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i8(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv8i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv8i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i32(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i32(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv8i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv8i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv64i8(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv64i8(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv64i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv64i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i8(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i8(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv4i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv4i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i32) @@ -55128,211 +8356,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f64_nxv1i16(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv32i8(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv32i8(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv32i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv32i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i8(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i8(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv2i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv2i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i32(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv16i32(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv16i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv16i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i16(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i16(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv2i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv2i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i32(double*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i32(,,,,,,,, double*, , , i32) - -define @test_vloxseg8_nxv1f64_nxv4i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f64_nxv4i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -55341,117 +8368,13 @@ define @test_vloxseg8_mask_nxv1f64_nxv4i32(double* %base, ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t +; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv16i16(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv16i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i8(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv1i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv16i8(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv16i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float*, , i32) @@ -55470,364 +8393,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i16(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv32i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv32i16(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv32i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i32(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv1i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i16(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i8(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i32(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv8i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv64i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv64i8(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv64i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i8(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i16(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv1i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv1i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv32i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv32i8(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv32i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i32) @@ -55844,58 +8423,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv16i32(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv16i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i32) @@ -55912,163 +8453,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i32(,, float*, , , i32) - -define @test_vloxseg2_nxv2f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv4i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv16i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv16i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv1i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv16i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv16i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i32) @@ -56085,375 +8483,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv32i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv32i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv32i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i32(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv1i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i32(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv8i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv64i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv64i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv64i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv1i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv1i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv32i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv32i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv32i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i32) @@ -56470,60 +8515,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv16i32(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv16i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i32) @@ -56540,168 +8547,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i32(,,, float*, , , i32) - -define @test_vloxseg3_nxv2f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv4i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv16i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv1i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv16i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i32) @@ -56718,386 +8579,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv32i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv32i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i32(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv1i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i32(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv64i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv64i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv1i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv32i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv32i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i32) @@ -57114,62 +8612,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv16i32(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i32) @@ -57186,173 +8645,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i32(,,,, float*, , , i32) - -define @test_vloxseg4_nxv2f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i16(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv16i16(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i8(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv1i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv16i8(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i32) @@ -57369,397 +8678,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i16(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i16(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv32i16(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv32i16(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i32(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i32(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv1i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i16(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i16(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i8(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i32(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i32(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv64i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv64i8(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i8(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i16(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i16(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv1i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv32i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv32i8(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i32) @@ -57776,64 +8712,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i32(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv16i32(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i32) @@ -57850,178 +8746,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i32(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i32(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv2f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv16i16(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i8(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv1i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv16i8(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i32) @@ -58038,408 +8780,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i16(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv32i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv32i16(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i32(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i32(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv1i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i16(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i8(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i32(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i32(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv64i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv64i8(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i8(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i16(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv1i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv32i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv32i8(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i32) @@ -58456,66 +8815,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i32(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv16i32(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i32) @@ -58532,183 +8850,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i32(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i32(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv2f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv16i16(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i8(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv1i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv16i8(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i32) @@ -58725,11 +8885,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -58737,407 +8896,15 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i32(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i16(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv32i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv32i16(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i32(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i32(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv1i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i16(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i8(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i32(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i32(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv64i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv64i8(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i8(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i16(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv1i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv32i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv32i8(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i32) @@ -59154,11 +8921,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -59166,56 +8932,15 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i8(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i32(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv16i32(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i32) @@ -59232,11 +8957,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -59244,176 +8968,15 @@ define @test_vloxseg7_mask_nxv2f32_nxv2i16(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i32(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i32(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv2f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv16i16(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i8(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv1i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv16i8(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i32) @@ -59430,171 +8993,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f32_nxv2i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i16(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv32i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv32i16(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i32(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i32(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv1i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i16(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -59603,257 +9005,15 @@ define @test_vloxseg8_mask_nxv2f32_nxv8i16(float* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i8(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i32(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i32(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv64i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv64i8(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i8(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i16(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv1i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv32i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv32i8(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i32) @@ -59870,70 +9030,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f32_nxv2i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i32(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv16i32(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i32) @@ -59950,51 +9067,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f32_nxv2i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i32(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i32(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv2f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -60003,49 +9079,13 @@ define @test_vloxseg8_mask_nxv2f32_nxv4i32(float* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv16i16(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv16i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half*, , i32) @@ -60064,160 +9104,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv16i8(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv16i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i32(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i16(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv32i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv32i16(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv32i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i32) @@ -60234,194 +9134,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i16(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i8(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i32(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv8i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv64i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv64i8(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv64i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i8(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i32) @@ -60438,229 +9164,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv32i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv32i8(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv32i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i8(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv16i32(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv16i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i16(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv2i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i32(,, half*, , , i32) - -define @test_vloxseg2_nxv1f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv4i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv16i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv16i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i32) @@ -60677,165 +9194,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv16i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv16i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv32i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv32i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv32i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i32) @@ -60852,200 +9226,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv8i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv64i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv64i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv64i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i32) @@ -61062,236 +9258,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv32i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv32i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv32i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv16i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv16i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv2i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv1f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv4i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv16i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i32) @@ -61308,170 +9290,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv16i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv32i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv32i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i32) @@ -61488,206 +9323,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv64i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv64i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i32) @@ -61704,243 +9356,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv32i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv32i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv16i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv1f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv16i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i32) @@ -61957,175 +9389,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv16i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i32(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv32i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv32i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i32) @@ -62142,212 +9423,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i32(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv64i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv64i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i32) @@ -62364,250 +9457,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv32i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv32i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv16i32(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i32(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv1f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv16i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i32) @@ -62624,180 +9491,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv16i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i32(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv32i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv32i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i32) @@ -62814,218 +9526,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i32(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv64i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv64i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i32) @@ -63042,257 +9561,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv32i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv32i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv16i32(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i32(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv1f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv16i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i32) @@ -63309,11 +9596,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -63321,173 +9607,15 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i8(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv16i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i32(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv32i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv32i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i32) @@ -63504,11 +9632,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -63516,212 +9643,15 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i32(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i32(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv64i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv64i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i32) @@ -63738,11 +9668,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -63750,252 +9679,15 @@ define @test_vloxseg7_mask_nxv1f16_nxv1i16(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv32i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv32i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv16i32(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i32(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv1f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv16i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i32) @@ -64012,51 +9704,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f16_nxv1i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv16i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -64065,137 +9716,15 @@ define @test_vloxseg8_mask_nxv1f16_nxv16i8(half* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i32(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv32i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv32i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i32) @@ -64212,51 +9741,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f16_nxv1i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -64265,177 +9753,15 @@ define @test_vloxseg8_mask_nxv1f16_nxv8i16(half* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i32(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv64i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv64i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i32) @@ -64452,211 +9778,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f16_nxv1i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv32i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv32i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv16i32(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i32(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv1f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -64665,49 +9790,13 @@ define @test_vloxseg8_mask_nxv1f16_nxv4i32(half* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv16i16(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv16i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float*, , i32) @@ -64726,160 +9815,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv16i8(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv16i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i32(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv2i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i16(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv32i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv32i16(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv32i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i32) @@ -64896,194 +9845,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i16(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i8(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i32(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv8i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv64i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv64i8(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv64i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i8(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i32) @@ -65100,229 +9875,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv32i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv32i8(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv1f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv32i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i8(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv2i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv16i32(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv16i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i16(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv2i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv2i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i32(,, float*, , , i32) - -define @test_vloxseg2_nxv1f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv1f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv4i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv16i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv16i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i32) @@ -65339,165 +9905,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv16i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv16i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i32(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv2i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv32i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv32i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv32i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i32) @@ -65514,200 +9937,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i32(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv8i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv64i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv64i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv64i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i32) @@ -65724,236 +9969,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv32i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv32i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv1f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv32i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv2i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv16i32(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv16i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv2i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv2i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i32(,,, float*, , , i32) - -define @test_vloxseg3_nxv1f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv1f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv4i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv16i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i32) @@ -65970,170 +10001,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv16i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i32(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv2i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv32i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv32i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i32) @@ -66150,206 +10034,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i32(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv64i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv64i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i32) @@ -66366,243 +10067,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv32i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv32i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv1f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv2i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv16i32(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv2i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i32(,,,, float*, , , i32) - -define @test_vloxseg4_nxv1f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv1f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i16(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv16i16(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i32) @@ -66619,175 +10100,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv16i8(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i32(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i32(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv2i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i16(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i16(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv32i16(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv32i16(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i32) @@ -66804,212 +10134,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i16(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i16(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i8(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i32(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i32(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv64i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv64i8(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i8(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i32) @@ -67026,250 +10168,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv32i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv32i8(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv1f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i8(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i8(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv2i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i32(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv16i32(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i16(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i16(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv2i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i32(float*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i32(,,,,, float*, , , i32) - -define @test_vloxseg5_nxv1f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv1f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv16i16(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i32) @@ -67286,180 +10202,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv16i8(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i32(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i32(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv2i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i16(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv32i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv32i16(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i32) @@ -67476,218 +10237,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i16(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i8(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i32(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i32(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv64i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv64i8(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i8(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i32) @@ -67704,257 +10272,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv32i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv32i8(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv1f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i8(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i8(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv2i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i32(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv16i32(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i16(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i16(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv2i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i32(float*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i32(,,,,,, float*, , , i32) - -define @test_vloxseg6_nxv1f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv1f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv16i16(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i32) @@ -67971,11 +10307,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -67983,173 +10318,15 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i8(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv16i8(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i32(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i32(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv2i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i16(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv32i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv32i16(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i32) @@ -68166,11 +10343,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -68178,212 +10354,15 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i32(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i16(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i8(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i32(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i32(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv64i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv64i8(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i8(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i32) @@ -68400,11 +10379,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -68412,252 +10390,15 @@ define @test_vloxseg7_mask_nxv1f32_nxv1i16(float* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv32i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv32i8(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv1f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i8(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i8(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv2i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i32(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv16i32(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i16(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i16(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv2i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i32(float*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i32(,,,,,,, float*, , , i32) - -define @test_vloxseg7_nxv1f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv1f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv16i16(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i32) @@ -68674,51 +10415,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f32_nxv1i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv16i8(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -68727,137 +10427,15 @@ define @test_vloxseg8_mask_nxv1f32_nxv16i8(float* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i32(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i32(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv2i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i16(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv32i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv32i16(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i32) @@ -68874,51 +10452,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f32_nxv1i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i16(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -68927,177 +10464,15 @@ define @test_vloxseg8_mask_nxv1f32_nxv8i16(float* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i8(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i32(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i32(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv64i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv64i8(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i8(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i32) @@ -69114,211 +10489,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv1f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv1f32_nxv1i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv32i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv32i8(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i8(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i8(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv2i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i32(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv16i32(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i16(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i16(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv2i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i32(float*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i32(,,,,,,,, float*, , , i32) - -define @test_vloxseg8_nxv1f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv1f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -69327,253 +10501,13 @@ define @test_vloxseg8_mask_nxv1f32_nxv4i32(float* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv1f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv16i16(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv16i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i8(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv16i8(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv16i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i32(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i16(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv32i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv32i16(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv32i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i32(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half*, , i32) @@ -69592,22 +10526,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f16_nxv8i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half*, , i32) @@ -69626,22 +10556,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f16_nxv8i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half*, , i32) @@ -69660,541 +10586,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f16_nxv8i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv64i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv64i8(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv64i8(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv64i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i8(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i16(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv1i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv32i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv32i8(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv32i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i8(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv16i32(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv16i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i16(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv2i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i32(,, half*, , , i32) - -define @test_vloxseg2_nxv8f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv4i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv16i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv16i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv16i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv16i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv32i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv32i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv32i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i32) @@ -70211,23 +10616,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv8f16_nxv8i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half*, , i32) @@ -70246,23 +10648,20 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv8f16_nxv8i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,} %0, 1 + ret %1 } declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half*, , i32) @@ -70281,557 +10680,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv8f16_nxv8i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v6, v8 +; CHECK-NEXT: vmv2r.v v10, v6 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv64i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv64i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv64i8(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv8f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv64i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv1i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv32i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv32i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv32i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv16i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv16i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv2i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv8f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv8f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv4i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv16i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv16i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv16i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv16i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv32i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv32i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv32i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i32) @@ -70848,24 +10711,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv8f16_nxv8i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half*, , i32) @@ -70884,24 +10744,21 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv8f16_nxv8i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 } declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half*, , i32) @@ -70920,552 +10777,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv8f16_nxv8i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv64i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv64i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 ; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv64i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv8f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv1i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv32i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv32i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv32i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv16i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv16i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv2i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv8f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv8f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv4i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv16i16(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv16i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i8(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv1i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv16i8(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv16i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i32(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv2i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i16(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv4i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv32i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv32i16(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv32i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i32(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv1i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i32) @@ -71482,22 +10810,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f32_nxv8i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float*, , i32) @@ -71516,22 +10840,18 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f32_nxv8i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float*, , i32) @@ -71550,398 +10870,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv8f32_nxv8i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v4, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t +; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv64i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv64i8(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv64i8(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv8f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv64i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i8(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv4i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i16(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv1i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv1i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv32i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv32i8(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv32i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i8(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv2i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv16i32(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v20, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v20 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv16i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i16(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv2i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv2i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i32(,, float*, , , i32) - -define @test_vloxseg2_nxv8f32_nxv4i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv8f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv8f32_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv4r.v v8, v16 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv4i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv16i16(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv16i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv16i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv16i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i8(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv1i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv1i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv16i8(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv16i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv16i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv16i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i32) @@ -71958,364 +10900,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i32(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f64_nxv2i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i16(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv4i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i16(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv4i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv32i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv32i16(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv32i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv32i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv32i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i32(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv1i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv1i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i16(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv8i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv8i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i8(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv8i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv8i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i32(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv8i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv8i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv8i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv64i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv64i8(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv64i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv64i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv64i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i8(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv4i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv4i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i16(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i16(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv1i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv1i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv1i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv32i8(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv32i8(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv32i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f64_nxv32i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv32i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i32) @@ -72332,58 +10930,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i8(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f64_nxv2i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv16i32(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv16i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i32(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv16i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv16i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i32) @@ -72400,163 +10960,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv2i16(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f64_nxv2i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i32(double*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i32(,, double*, , , i32) - -define @test_vloxseg2_nxv2f64_nxv4i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i32(double* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f64_nxv4i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv4i32( %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i16(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv16i16(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv16i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv16i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv16i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i8(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv1i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv1i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv16i8(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv16i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv16i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv16i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i32) @@ -72573,375 +10990,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i32(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f64_nxv2i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i16(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i16(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv4i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i16(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv4i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv32i16(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv32i16(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv32i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv32i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv32i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i32(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i32(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv1i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv1i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i16(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i16(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv8i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv8i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i8(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv8i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv8i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i32(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i32(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv8i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv8i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv8i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv64i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv64i8(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv64i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv64i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv64i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i8(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv4i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv4i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i16(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i16(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv1i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv1i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv1i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv32i8(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv32i8(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv32i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f64_nxv32i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv32i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i32) @@ -72958,60 +11022,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i8(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f64_nxv2i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i32(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv16i32(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv16i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i32(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv16i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv16i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i32) @@ -73028,168 +11054,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv2i16(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f64_nxv2i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i32(double*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i32(,,, double*, , , i32) - -define @test_vloxseg3_nxv2f64_nxv4i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i32(double* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f64_nxv4i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv4i32( %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i16(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv16i16(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv16i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv16i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv16i16( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i8(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv1i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv1i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv16i8(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv16i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv16i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv16i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i32) @@ -73206,386 +11086,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i32(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f64_nxv2i32( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i16(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i16(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv4i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv4i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i16( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv32i16(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv32i16(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv32i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv32i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv32i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv32i16( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i32(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i32(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv1i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv1i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i32( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i16(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i16(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv8i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv8i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i16( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i8(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv8i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv8i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i32(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i32(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv8i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv8i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv8i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv8i32( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv64i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv64i8(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv64i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv64i8(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv64i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv64i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv64i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i8(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv4i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv4i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i16(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i16(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv1i16(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv1i16(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv1i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv1i16( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv32i8(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv32i8(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv32i8(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f64_nxv32i8(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv32i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv32i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i32) @@ -73602,62 +11119,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i8(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f64_nxv2i8( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i32(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv16i32(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv16i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv16i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv16i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv16i32( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i32) @@ -73674,198 +11152,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv2i16(double* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f64_nxv2i16( %val, double* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i32(double*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i32(,,,, double*, , , i32) - -define @test_vloxseg4_nxv2f64_nxv4i32(double* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i32(double* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f64_nxv4i32(double* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv4i32(double* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f64.nxv4i32( %1, %1, %1, %1, double* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv16i16(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv16i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i8(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv16i8(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv16i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i32(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i32) @@ -73882,228 +11185,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv32i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv32i16(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv32i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i32(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i16(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i8(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i32(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv8i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv64i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv64i8(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv64i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i32) @@ -74120,194 +11215,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i16(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv1i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv32i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv32i8(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv32i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i8(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv16i32(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv16i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i16(,, half*, , , i32) - -define @test_vloxseg2_nxv4f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv2i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i32) @@ -74324,164 +11245,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv16i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv16i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv16i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv16i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i32) @@ -74498,235 +11275,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv32i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv32i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv32i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv8i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv64i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv64i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv64i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i32) @@ -74743,200 +11307,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv1i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv32i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv32i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv32i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv16i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv16i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv4f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv2i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i32) @@ -74953,169 +11339,21 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v7, v8 +; CHECK-NEXT: vmv1r.v v9, v7 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv16i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv16i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i32) @@ -75132,242 +11370,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv32i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv32i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv64i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv64i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i32) @@ -75384,206 +11403,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv32i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv32i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv16i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv4f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i32) @@ -75600,174 +11436,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv16i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv16i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i32(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i32) @@ -75784,249 +11469,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv32i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv32i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i32(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i32(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv64i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv64i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i32) @@ -76043,212 +11503,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv32i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv32i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv16i32(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv4f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv4f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i32) @@ -76265,179 +11537,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv4f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv16i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv16i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i32(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i32) @@ -76454,256 +11571,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv32i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv32i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i32(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i32(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv64i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv64i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i32) @@ -76720,218 +11606,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv32i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv32i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv16i32(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv4f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv4f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i32) @@ -76948,184 +11641,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv4f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv16i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv16i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i32(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i32) @@ -77142,11 +11676,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -77154,251 +11687,15 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i16(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv32i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv32i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i32(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i32(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv64i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv64i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i32) @@ -77415,11 +11712,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -77427,212 +11723,15 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i8(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv32i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv32i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv16i32(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv4f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv4f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i32) @@ -77649,11 +11748,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv4f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -77661,177 +11759,15 @@ define @test_vloxseg7_mask_nxv4f16_nxv4i32(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv16i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv16i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i32(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv2i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i32) @@ -77848,131 +11784,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv4f16_nxv4i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv32i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv32i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i32(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -77981,137 +11796,15 @@ define @test_vloxseg8_mask_nxv4f16_nxv8i16(half* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i32(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv64i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv64i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i32) @@ -78128,230 +11821,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv4f16_nxv4i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv32i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv32i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv2i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv16i32(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv4f16_nxv2i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv4f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i32) @@ -78368,130 +11858,25 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv4f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv4f16_nxv4i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 +; CHECK-NEXT: vmv1r.v v12, v8 +; CHECK-NEXT: vmv1r.v v13, v12 +; CHECK-NEXT: vmv1r.v v14, v12 +; CHECK-NEXT: vmv1r.v v15, v12 +; CHECK-NEXT: vmv1r.v v16, v12 +; CHECK-NEXT: vmv1r.v v17, v12 +; CHECK-NEXT: vmv1r.v v18, v12 +; CHECK-NEXT: vmv1r.v v19, v12 ; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv16i16(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv16i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i8(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv16i8(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv16i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half*, , i32) @@ -78510,364 +11895,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i16(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv32i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv32i16(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv32i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i32(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i16(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i8(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i32(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv8i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv64i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv64i8(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv64i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i8(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i16(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i16(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv1i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv32i8(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv32i8(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv2f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv32i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i32) @@ -78884,58 +11925,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv16i32(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv16i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i32) @@ -78952,163 +11955,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v7, v8 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 +; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t +; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i32(half*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i32(,, half*, , , i32) - -define @test_vloxseg2_nxv2f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv2f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv4i32( %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv16i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv16i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv16i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv16i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i32) @@ -79125,375 +11985,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv32i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv32i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv32i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv8i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv64i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv64i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv64i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i16(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i16(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv1i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv32i8(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv32i8(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv2f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv32i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i32) @@ -79510,60 +12017,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv16i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv16i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i32) @@ -79580,168 +12049,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i32(half*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i32(,,, half*, , , i32) - -define @test_vloxseg3_nxv2f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv2f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv4i32( %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv16i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv16i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i32) @@ -79758,386 +12081,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv32i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv32i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv64i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv64i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i16(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i16(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv32i8(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv32i8(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv2f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i32) @@ -80154,62 +12114,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv16i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i32) @@ -80226,173 +12147,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i32(half*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i32(,,,, half*, , , i32) - -define @test_vloxseg4_nxv2f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv2f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv16i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv16i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i32) @@ -80409,397 +12180,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv32i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv32i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i32(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i32(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv64i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv64i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i16(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i16(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv32i8(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv32i8(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg5_mask_nxv2f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i32) @@ -80816,64 +12214,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv16i32(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half*, , i32) declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i32) @@ -80890,178 +12248,24 @@ entry: ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg5_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg5ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i32(half*, , i32) -declare {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i32(,,,,, half*, , , i32) - -define @test_vloxseg5_nxv2f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,} %0, 1 ret %1 } -define @test_vloxseg5_mask_nxv2f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg5ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,} %0, 0 - %2 = tail call {,,,,} @llvm.riscv.vloxseg5.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv16i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv16i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i32) @@ -81078,408 +12282,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv32i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv32i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i32(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i32(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv64i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv64i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i16(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i16(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv32i8(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv32i8(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg6_mask_nxv2f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i32) @@ -81496,66 +12317,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei8.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv16i32(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half*, , i32) declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i32) @@ -81572,183 +12352,25 @@ entry: ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg6_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 ; CHECK-NEXT: vmv1r.v v5, v1 ; CHECK-NEXT: vmv1r.v v6, v1 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v8, v0.t +; CHECK-NEXT: vloxseg6ei16.v v1, (a0), v9, v0.t ; CHECK-NEXT: vmv1r.v v8, v2 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i32(half*, , i32) -declare {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i32(,,,,,, half*, , , i32) - -define @test_vloxseg6_nxv2f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,} %0, 1 ret %1 } -define @test_vloxseg6_mask_nxv2f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg6ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,} %0, 0 - %2 = tail call {,,,,,} @llvm.riscv.vloxseg6.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv16i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv16i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i32) @@ -81765,11 +12387,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -81777,407 +12398,15 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i32(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv32i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv32i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i32(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i32(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv64i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv64i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i16(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i16(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv32i8(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv32i8(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg7_mask_nxv2f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i32) @@ -82194,11 +12423,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei8.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -82206,56 +12434,15 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i8(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv16i32(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half*, , i32) declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i32) @@ -82272,11 +12459,10 @@ entry: ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg7_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei16.v v1, (a0), v8 +; CHECK-NEXT: vmv1r.v v1, v8 ; CHECK-NEXT: vmv1r.v v2, v1 ; CHECK-NEXT: vmv1r.v v3, v1 ; CHECK-NEXT: vmv1r.v v4, v1 @@ -82284,176 +12470,15 @@ define @test_vloxseg7_mask_nxv2f16_nxv2i16(half* %base, ,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i32(half*, , i32) -declare {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i32(,,,,,,, half*, , , i32) - -define @test_vloxseg7_nxv2f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,} %0, 1 ret %1 } -define @test_vloxseg7_mask_nxv2f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8 -; CHECK-NEXT: vmv1r.v v2, v1 -; CHECK-NEXT: vmv1r.v v3, v1 -; CHECK-NEXT: vmv1r.v v4, v1 -; CHECK-NEXT: vmv1r.v v5, v1 -; CHECK-NEXT: vmv1r.v v6, v1 -; CHECK-NEXT: vmv1r.v v7, v1 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg7ei32.v v1, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,} %0, 0 - %2 = tail call {,,,,,,} @llvm.riscv.vloxseg7.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv16i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv16i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv16i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv16i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv1i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv1i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv16i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv16i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv16i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v11, v10 -; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v11 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv16i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i32) @@ -82470,171 +12495,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i32(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f16_nxv2i32( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv4i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv4i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv4i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv32i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv32i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv32i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv32i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv32i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv32i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i32(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv1i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv1i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv8i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv8i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -82643,257 +12507,15 @@ define @test_vloxseg8_mask_nxv2f16_nxv8i16(half* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv8i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv8i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i32(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv8i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv8i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv8i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv8i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv64i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv64i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv64i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv64i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv64i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv64i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv4i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv4i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv4i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i16(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i16(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv1i16(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv1i16(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv1i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv1i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv32i8(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv32i8(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv32i8(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv32i8(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v13, v12 -; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v13 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv32i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv32i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i32) @@ -82910,70 +12532,27 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i8(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f16_nxv2i8( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vmv1r.v v11, v10 +; CHECK-NEXT: vmv1r.v v12, v10 +; CHECK-NEXT: vmv1r.v v13, v10 +; CHECK-NEXT: vmv1r.v v14, v10 +; CHECK-NEXT: vmv1r.v v15, v10 +; CHECK-NEXT: vmv1r.v v16, v10 +; CHECK-NEXT: vmv1r.v v17, v10 ; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t +; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i8(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv16i32(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv16i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,,,,,} %0, 1 ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv16i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v17, v16 -; CHECK-NEXT: vmv1r.v v18, v16 -; CHECK-NEXT: vmv1r.v v19, v16 -; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v17 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv16i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv16i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half*, , i32) declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i32) @@ -82990,51 +12569,10 @@ entry: ret %1 } -define @test_vloxseg8_mask_nxv2f16_nxv2i16(half* %base, %index, i32 %vl, %mask) { +define @test_vloxseg8_mask_nxv2f16_nxv2i16( %val, half* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v10, v9 -; CHECK-NEXT: vmv1r.v v11, v9 -; CHECK-NEXT: vmv1r.v v12, v9 -; CHECK-NEXT: vmv1r.v v13, v9 -; CHECK-NEXT: vmv1r.v v14, v9 -; CHECK-NEXT: vmv1r.v v15, v9 -; CHECK-NEXT: vmv1r.v v16, v9 -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu -; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8, v0.t -; CHECK-NEXT: vmv1r.v v8, v10 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv2i16(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i32(half*, , i32) -declare {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i32(,,,,,,,, half*, , , i32) - -define @test_vloxseg8_nxv2f16_nxv4i32(half* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 1 - ret %1 -} - -define @test_vloxseg8_mask_nxv2f16_nxv4i32(half* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv4i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu -; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 +; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 @@ -83043,151 +12581,13 @@ define @test_vloxseg8_mask_nxv2f16_nxv4i32(half* %base, ,,,,,,,} @llvm.riscv.vloxseg8.nxv2f16.nxv4i32(half* %base, %index, i32 %vl) - %1 = extractvalue {,,,,,,,} %0, 0 - %2 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv4i32( %1, %1, %1, %1, %1, %1, %1, %1, half* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,,,,,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv16i16(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv16i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i8(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv1i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv16i8(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv16i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i32(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv2i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 + %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,,,,,} %0, 1 + ret %1 } declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float*, , i32) @@ -83206,228 +12606,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f32_nxv4i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv32i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv32i16(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv32i16(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv32i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i32(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv1i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i16(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i8(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i32(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv8i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv64i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv64i8(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv64i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i32) @@ -83444,194 +12636,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f32_nxv4i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i16(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv1i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv1i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv32i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv32i8(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv32i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i8(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i8(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv2i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i8( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i32(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv16i32(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv16i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i16(float*, , i32) -declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i16(,, float*, , , i32) - -define @test_vloxseg2_nxv4f32_nxv2i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 1 - ret %1 -} - -define @test_vloxseg2_mask_nxv4f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv2i16( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - declare {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float*, , i32) declare {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i32) @@ -83648,164 +12666,20 @@ entry: ret %1 } -define @test_vloxseg2_mask_nxv4f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg2_mask_nxv4f32_nxv4i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg2_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v8 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg2ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 +; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t +; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret entry: - %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,} %0, 0 - %2 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32( %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv16i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 + %0 = tail call {,} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv16i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv1i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv16i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv16i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i32(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv2i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i32) @@ -83822,235 +12696,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f32_nxv4i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv32i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv32i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv32i16(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv32i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i32(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv1i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i32(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv8i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv64i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv64i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv64i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i32) @@ -84067,200 +12728,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f32_nxv4i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv1i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i16(float* %base, %index, i32 %vl) + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv1i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv32i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv32i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv32i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i8(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i8(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv2i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei8.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i8( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i32(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv16i32(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv16i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i16(float*, , i32) -declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i16(,,, float*, , , i32) - -define @test_vloxseg3_nxv4f32_nxv2i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 1 - ret %1 -} - -define @test_vloxseg3_mask_nxv4f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8 -; CHECK-NEXT: vmv2r.v v4, v2 -; CHECK-NEXT: vmv2r.v v6, v2 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei16.v v2, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v4 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv2i16( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - declare {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float*, , i32) declare {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i32) @@ -84277,169 +12760,22 @@ entry: ret %1 } -define @test_vloxseg3_mask_nxv4f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg3_mask_nxv4f32_nxv4i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8 +; CHECK-NEXT: vmv2r.v v2, v8 ; CHECK-NEXT: vmv2r.v v4, v2 ; CHECK-NEXT: vmv2r.v v6, v2 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v8, v0.t +; CHECK-NEXT: vloxseg3ei32.v v2, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v4 ; CHECK-NEXT: ret entry: - %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,} %0, 0 - %2 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv16i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv16i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 + %0 = tail call {,,} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv16i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv16i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v14, v12 -; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v14 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv16i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv1i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv1i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv16i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv16i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv16i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv16i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv16i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i32(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv2i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv2i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv2i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i32) @@ -84456,242 +12792,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i16(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f32_nxv4i16( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv32i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv32i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv32i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv32i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv32i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv32i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv32i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i32(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv1i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv1i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv1i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv8i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv8i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv8i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv8i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv8i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv8i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i32(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv8i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv8i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv8i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv8i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv8i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv64i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv64i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv64i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv64i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv64i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv64i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv64i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv64i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i32) @@ -84708,206 +12825,23 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i8(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f32_nxv4i8( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv1i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv1i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv1i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv1i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv1i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv32i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv32i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv32i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv32i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv32i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8 +; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v8, v0.t +; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv32i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv32i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i8(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i8(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv2i8(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i8(float* %base, %index, i32 %vl) + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) %1 = extractvalue {,,,} %0, 1 ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv2i8(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv2i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i8(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i8( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i32(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv16i32(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv16i32(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv16i32(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv16i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v18, v16 -; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v18 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv16i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv16i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - -declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i16(float*, , i32) -declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i16(,,,, float*, , , i32) - -define @test_vloxseg4_nxv4f32_nxv2i16(float* %base, %index, i32 %vl) { -; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 1 - ret %1 -} - -define @test_vloxseg4_mask_nxv4f32_nxv2i16(float* %base, %index, i32 %vl, %mask) { -; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv2i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 -; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 -; CHECK-NEXT: ret -entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv2i16(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv2i16( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 -} - declare {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float*, , i32) declare {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i32) @@ -84924,23 +12858,20 @@ entry: ret %1 } -define @test_vloxseg4_mask_nxv4f32_nxv4i32(float* %base, %index, i32 %vl, %mask) { +define @test_vloxseg4_mask_nxv4f32_nxv4i32( %val, float* %base, %index, i32 %vl, %mask) { ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v12, v10 -; CHECK-NEXT: vmv2r.v v14, v10 -; CHECK-NEXT: vmv2r.v v16, v10 +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vmv2r.v v14, v12 +; CHECK-NEXT: vmv2r.v v16, v12 +; CHECK-NEXT: vmv2r.v v18, v12 ; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu -; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8, v0.t -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t +; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret entry: - %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) - %1 = extractvalue {,,,} %0, 0 - %2 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32( %1, %1, %1, %1, float* %base, %index, %mask, i32 %vl) - %3 = extractvalue {,,,} %2, 1 - ret %3 + %0 = tail call {,,,} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i32 %vl) + %1 = extractvalue {,,,} %0, 1 + ret %1 }