From 9c841cb8e8838707b4b005307352d76ed930ee61 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Thu, 11 Mar 2021 09:26:43 -0800 Subject: [PATCH] [RISCV] Support extract_vector_elt for fixed and scalable masked registers. This uses a really simple approach of converting to an i8 vector and extracting. This is probably not the best approach especially if you know the index is constant. Other ideas: -Store to stack temporary using vse1, load as scalar and shift. -Sort of bitcast the vector to a vector of i8, slide down the appropriate 8 bit element, copy to scalar, shift down the correct bit within the 8 bits we extracted. Not exactly sure how to describe such a bitcast from i1 vector to i8 vector within the type system for elements less than 8. Reviewed By: frasercrmck Differential Revision: https://reviews.llvm.org/D98310 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 13 +- llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll | 129 +++++++++++ .../RISCV/rvv/fixed-vectors-extract-i1.ll | 217 ++++++++++++++++++ 3 files changed, 358 insertions(+), 1 deletion(-) create mode 100644 llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 39041dfcda0e..2c51964c2780 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -419,6 +419,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(ISD::TRUNCATE, VT, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); + + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); } for (MVT VT : IntVecVTs) { @@ -541,6 +543,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(ISD::BUILD_VECTOR, VT, Custom); setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); + setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); + setOperationAction(ISD::LOAD, VT, Custom); setOperationAction(ISD::STORE, VT, Custom); @@ -558,7 +562,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); - setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); setOperationAction(ISD::ADD, VT, Custom); setOperationAction(ISD::MUL, VT, Custom); @@ -2279,6 +2282,14 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, MVT VecVT = Vec.getSimpleValueType(); MVT XLenVT = Subtarget.getXLenVT(); + if (VecVT.getVectorElementType() == MVT::i1) { + // FIXME: For now we just promote to an i8 vector and extract from that, + // but this is probably not optimal. + MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount()); + Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec); + return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx); + } + // If this is a fixed vector, we need to convert it to a scalable vector. MVT ContainerVT = VecVT; if (VecVT.isFixedLengthVector()) { diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll new file mode 100644 index 000000000000..5c9328bc96e9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll @@ -0,0 +1,129 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK + +define i1 @extractelt_nxv1i1(* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8,mf8,ta,mu +; CHECK-NEXT: vle8.v v25, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,mf8,ta,mu +; CHECK-NEXT: vslidedown.vx v25, v25, a1 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: ret + %a = load , * %x + %b = icmp eq %a, zeroinitializer + %c = extractelement %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_nxv2i1(* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8,mf4,ta,mu +; CHECK-NEXT: vle8.v v25, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,mf4,ta,mu +; CHECK-NEXT: vslidedown.vx v25, v25, a1 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: ret + %a = load , * %x + %b = icmp eq %a, zeroinitializer + %c = extractelement %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_nxv4i1(* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8,mf2,ta,mu +; CHECK-NEXT: vle8.v v25, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,mf2,ta,mu +; CHECK-NEXT: vslidedown.vx v25, v25, a1 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: ret + %a = load , * %x + %b = icmp eq %a, zeroinitializer + %c = extractelement %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_nxv8i1(* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vl1r.v v25, (a0) +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,m1,ta,mu +; CHECK-NEXT: vslidedown.vx v25, v25, a1 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: ret + %a = load , * %x + %b = icmp eq %a, zeroinitializer + %c = extractelement %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_nxv16i1(* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vl2r.v v26, (a0) +; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v26, 0 +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v26, v26, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,m2,ta,mu +; CHECK-NEXT: vslidedown.vx v26, v26, a1 +; CHECK-NEXT: vmv.x.s a0, v26 +; CHECK-NEXT: ret + %a = load , * %x + %b = icmp eq %a, zeroinitializer + %c = extractelement %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_nxv32i1(* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vl4r.v v28, (a0) +; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v28, 0 +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v28, v28, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,m4,ta,mu +; CHECK-NEXT: vslidedown.vx v28, v28, a1 +; CHECK-NEXT: vmv.x.s a0, v28 +; CHECK-NEXT: ret + %a = load , * %x + %b = icmp eq %a, zeroinitializer + %c = extractelement %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_nxv64i1(* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8r.v v8, (a0) +; CHECK-NEXT: vsetvli a0, zero, e8,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,m8,ta,mu +; CHECK-NEXT: vslidedown.vx v8, v8, a1 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %a = load , * %x + %b = icmp eq %a, zeroinitializer + %c = extractelement %b, i64 %idx + ret i1 %c +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll new file mode 100644 index 000000000000..a0149fed306b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll @@ -0,0 +1,217 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +define i1 @extractelt_v1i1(<1 x i8>* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_v1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a2, 1, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 +; CHECK-NEXT: vslidedown.vx v25, v25, a1 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: ret + %a = load <1 x i8>, <1 x i8>* %x + %b = icmp eq <1 x i8> %a, zeroinitializer + %c = extractelement <1 x i1> %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_v2i1(<2 x i8>* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_v2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a2, 2, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,m1,ta,mu +; CHECK-NEXT: vslidedown.vx v25, v25, a1 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: ret + %a = load <2 x i8>, <2 x i8>* %x + %b = icmp eq <2 x i8> %a, zeroinitializer + %c = extractelement <2 x i1> %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_v4i1(<4 x i8>* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a2, 4, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,m1,ta,mu +; CHECK-NEXT: vslidedown.vx v25, v25, a1 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: ret + %a = load <4 x i8>, <4 x i8>* %x + %b = icmp eq <4 x i8> %a, zeroinitializer + %c = extractelement <4 x i1> %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_v8i1(<8 x i8>* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_v8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a2, 8, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,m1,ta,mu +; CHECK-NEXT: vslidedown.vx v25, v25, a1 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: ret + %a = load <8 x i8>, <8 x i8>* %x + %b = icmp eq <8 x i8> %a, zeroinitializer + %c = extractelement <8 x i1> %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_v16i1(<16 x i8>* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_v16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu +; CHECK-NEXT: vle8.v v25, (a0) +; CHECK-NEXT: vmseq.vi v0, v25, 0 +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,m1,ta,mu +; CHECK-NEXT: vslidedown.vx v25, v25, a1 +; CHECK-NEXT: vmv.x.s a0, v25 +; CHECK-NEXT: ret + %a = load <16 x i8>, <16 x i8>* %x + %b = icmp eq <16 x i8> %a, zeroinitializer + %c = extractelement <16 x i1> %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_v32i1(<32 x i8>* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_v32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a2, zero, 32 +; CHECK-NEXT: vsetvli a2, a2, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a0) +; CHECK-NEXT: vmseq.vi v0, v26, 0 +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmerge.vim v26, v26, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,m2,ta,mu +; CHECK-NEXT: vslidedown.vx v26, v26, a1 +; CHECK-NEXT: vmv.x.s a0, v26 +; CHECK-NEXT: ret + %a = load <32 x i8>, <32 x i8>* %x + %b = icmp eq <32 x i8> %a, zeroinitializer + %c = extractelement <32 x i1> %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_v64i1(<64 x i8>* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_v64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a2, zero, 64 +; CHECK-NEXT: vsetvli a2, a2, e8,m4,ta,mu +; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vmseq.vi v0, v28, 0 +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmerge.vim v28, v28, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,m4,ta,mu +; CHECK-NEXT: vslidedown.vx v28, v28, a1 +; CHECK-NEXT: vmv.x.s a0, v28 +; CHECK-NEXT: ret + %a = load <64 x i8>, <64 x i8>* %x + %b = icmp eq <64 x i8> %a, zeroinitializer + %c = extractelement <64 x i1> %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_v128i1(<128 x i8>* %x, i64 %idx) nounwind { +; CHECK-LABEL: extractelt_v128i1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a2, zero, 128 +; CHECK-NEXT: vsetvli a2, a2, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vmseq.vi v0, v8, 0 +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 +; CHECK-NEXT: vsetivli a0, 1, e8,m8,ta,mu +; CHECK-NEXT: vslidedown.vx v8, v8, a1 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %a = load <128 x i8>, <128 x i8>* %x + %b = icmp eq <128 x i8> %a, zeroinitializer + %c = extractelement <128 x i1> %b, i64 %idx + ret i1 %c +} + +define i1 @extractelt_v256i1(<256 x i8>* %x, i64 %idx) nounwind { +; RV32-LABEL: extractelt_v256i1: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -384 +; RV32-NEXT: sw ra, 380(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 376(sp) # 4-byte Folded Spill +; RV32-NEXT: addi s0, sp, 384 +; RV32-NEXT: andi sp, sp, -128 +; RV32-NEXT: andi a1, a1, 255 +; RV32-NEXT: addi a2, a0, 128 +; RV32-NEXT: addi a3, zero, 128 +; RV32-NEXT: vsetvli a3, a3, e8,m8,ta,mu +; RV32-NEXT: vle8.v v8, (a0) +; RV32-NEXT: vle8.v v16, (a2) +; RV32-NEXT: mv a0, sp +; RV32-NEXT: add a0, a0, a1 +; RV32-NEXT: vmseq.vi v25, v8, 0 +; RV32-NEXT: vmseq.vi v0, v16, 0 +; RV32-NEXT: vmv.v.i v8, 0 +; RV32-NEXT: vmerge.vim v16, v8, 1, v0 +; RV32-NEXT: addi a1, sp, 128 +; RV32-NEXT: vse8.v v16, (a1) +; RV32-NEXT: vmv1r.v v0, v25 +; RV32-NEXT: vmerge.vim v8, v8, 1, v0 +; RV32-NEXT: vse8.v v8, (sp) +; RV32-NEXT: lb a0, 0(a0) +; RV32-NEXT: addi sp, s0, -384 +; RV32-NEXT: lw s0, 376(sp) # 4-byte Folded Reload +; RV32-NEXT: lw ra, 380(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 384 +; RV32-NEXT: ret +; +; RV64-LABEL: extractelt_v256i1: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -384 +; RV64-NEXT: sd ra, 376(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 368(sp) # 8-byte Folded Spill +; RV64-NEXT: addi s0, sp, 384 +; RV64-NEXT: andi sp, sp, -128 +; RV64-NEXT: andi a1, a1, 255 +; RV64-NEXT: addi a2, a0, 128 +; RV64-NEXT: addi a3, zero, 128 +; RV64-NEXT: vsetvli a3, a3, e8,m8,ta,mu +; RV64-NEXT: vle8.v v8, (a0) +; RV64-NEXT: vle8.v v16, (a2) +; RV64-NEXT: mv a0, sp +; RV64-NEXT: add a0, a0, a1 +; RV64-NEXT: vmseq.vi v25, v8, 0 +; RV64-NEXT: vmseq.vi v0, v16, 0 +; RV64-NEXT: vmv.v.i v8, 0 +; RV64-NEXT: vmerge.vim v16, v8, 1, v0 +; RV64-NEXT: addi a1, sp, 128 +; RV64-NEXT: vse8.v v16, (a1) +; RV64-NEXT: vmv1r.v v0, v25 +; RV64-NEXT: vmerge.vim v8, v8, 1, v0 +; RV64-NEXT: vse8.v v8, (sp) +; RV64-NEXT: lb a0, 0(a0) +; RV64-NEXT: addi sp, s0, -384 +; RV64-NEXT: ld s0, 368(sp) # 8-byte Folded Reload +; RV64-NEXT: ld ra, 376(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 384 +; RV64-NEXT: ret + %a = load <256 x i8>, <256 x i8>* %x + %b = icmp eq <256 x i8> %a, zeroinitializer + %c = extractelement <256 x i1> %b, i64 %idx + ret i1 %c +}