forked from OSchip/llvm-project
[RISCV] Guard the ISD::EXTRACT_VECTOR_ELT handling in ReplaceNodeResults against fixed vectors and non-MVT types.
The type legalizer is calling this code based on the scalar type so we need to verify the input type is a scalable vector. The vector type has also not been legalized yet when this is called so we need to use EVT for it.
This commit is contained in:
parent
ff6c84b803
commit
00c4e0a8f6
|
@ -2555,18 +2555,22 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
|
||||||
SDLoc DL(N);
|
SDLoc DL(N);
|
||||||
SDValue Vec = N->getOperand(0);
|
SDValue Vec = N->getOperand(0);
|
||||||
SDValue Idx = N->getOperand(1);
|
SDValue Idx = N->getOperand(1);
|
||||||
MVT VecVT = Vec.getSimpleValueType();
|
EVT VecVT = Vec.getValueType();
|
||||||
assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
|
assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
|
||||||
VecVT.getVectorElementType() == MVT::i64 &&
|
VecVT.getVectorElementType() == MVT::i64 &&
|
||||||
"Unexpected EXTRACT_VECTOR_ELT legalization");
|
"Unexpected EXTRACT_VECTOR_ELT legalization");
|
||||||
|
|
||||||
|
if (!VecVT.isScalableVector())
|
||||||
|
return;
|
||||||
|
|
||||||
SDValue Slidedown = Vec;
|
SDValue Slidedown = Vec;
|
||||||
MVT XLenVT = Subtarget.getXLenVT();
|
MVT XLenVT = Subtarget.getXLenVT();
|
||||||
// Unless the index is known to be 0, we must slide the vector down to get
|
// Unless the index is known to be 0, we must slide the vector down to get
|
||||||
// the desired element into index 0.
|
// the desired element into index 0.
|
||||||
if (!isNullConstant(Idx)) {
|
if (!isNullConstant(Idx)) {
|
||||||
SDValue Mask, VL;
|
SDValue Mask, VL;
|
||||||
std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
|
std::tie(Mask, VL) =
|
||||||
|
getDefaultScalableVLOps(VecVT.getSimpleVT(), DL, DAG, Subtarget);
|
||||||
Slidedown = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT,
|
Slidedown = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT,
|
||||||
DAG.getUNDEF(VecVT), Vec, Idx, Mask, VL);
|
DAG.getUNDEF(VecVT), Vec, Idx, Mask, VL);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,92 @@
|
||||||
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||||
|
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32
|
||||||
|
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64
|
||||||
|
|
||||||
|
; FIXME: This codegen needs to be improved. These tests previously asserted in
|
||||||
|
; ReplaceNodeResults on RV32.
|
||||||
|
|
||||||
|
define i64 @extractelt_v4i64(<4 x i64>* %x) nounwind {
|
||||||
|
; RV32-LABEL: extractelt_v4i64:
|
||||||
|
; RV32: # %bb.0:
|
||||||
|
; RV32-NEXT: addi sp, sp, -64
|
||||||
|
; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
|
||||||
|
; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
|
||||||
|
; RV32-NEXT: addi s0, sp, 64
|
||||||
|
; RV32-NEXT: andi sp, sp, -32
|
||||||
|
; RV32-NEXT: addi a1, zero, 8
|
||||||
|
; RV32-NEXT: vsetvli a1, a1, e32,m2,ta,mu
|
||||||
|
; RV32-NEXT: vle32.v v26, (a0)
|
||||||
|
; RV32-NEXT: vse32.v v26, (sp)
|
||||||
|
; RV32-NEXT: lw a0, 24(sp)
|
||||||
|
; RV32-NEXT: lw a1, 28(sp)
|
||||||
|
; RV32-NEXT: addi sp, s0, -64
|
||||||
|
; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
|
||||||
|
; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
|
||||||
|
; RV32-NEXT: addi sp, sp, 64
|
||||||
|
; RV32-NEXT: ret
|
||||||
|
;
|
||||||
|
; RV64-LABEL: extractelt_v4i64:
|
||||||
|
; RV64: # %bb.0:
|
||||||
|
; RV64-NEXT: addi sp, sp, -64
|
||||||
|
; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
|
||||||
|
; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
|
||||||
|
; RV64-NEXT: addi s0, sp, 64
|
||||||
|
; RV64-NEXT: andi sp, sp, -32
|
||||||
|
; RV64-NEXT: addi a1, zero, 4
|
||||||
|
; RV64-NEXT: vsetvli a1, a1, e64,m2,ta,mu
|
||||||
|
; RV64-NEXT: vle64.v v26, (a0)
|
||||||
|
; RV64-NEXT: vse64.v v26, (sp)
|
||||||
|
; RV64-NEXT: ld a0, 24(sp)
|
||||||
|
; RV64-NEXT: addi sp, s0, -64
|
||||||
|
; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
|
||||||
|
; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
|
||||||
|
; RV64-NEXT: addi sp, sp, 64
|
||||||
|
; RV64-NEXT: ret
|
||||||
|
%a = load <4 x i64>, <4 x i64>* %x
|
||||||
|
%b = extractelement <4 x i64> %a, i32 3
|
||||||
|
ret i64 %b
|
||||||
|
}
|
||||||
|
|
||||||
|
; This uses a non-power of 2 type so that it isn't an MVT to catch an
|
||||||
|
; incorrect use of getSimpleValueType().
|
||||||
|
define i64 @extractelt_v3i64(<3 x i64>* %x) nounwind {
|
||||||
|
; RV32-LABEL: extractelt_v3i64:
|
||||||
|
; RV32: # %bb.0:
|
||||||
|
; RV32-NEXT: addi sp, sp, -64
|
||||||
|
; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
|
||||||
|
; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
|
||||||
|
; RV32-NEXT: addi s0, sp, 64
|
||||||
|
; RV32-NEXT: andi sp, sp, -32
|
||||||
|
; RV32-NEXT: addi a1, zero, 8
|
||||||
|
; RV32-NEXT: vsetvli a1, a1, e32,m2,ta,mu
|
||||||
|
; RV32-NEXT: vle32.v v26, (a0)
|
||||||
|
; RV32-NEXT: vse32.v v26, (sp)
|
||||||
|
; RV32-NEXT: lw a0, 16(sp)
|
||||||
|
; RV32-NEXT: lw a1, 20(sp)
|
||||||
|
; RV32-NEXT: addi sp, s0, -64
|
||||||
|
; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
|
||||||
|
; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
|
||||||
|
; RV32-NEXT: addi sp, sp, 64
|
||||||
|
; RV32-NEXT: ret
|
||||||
|
;
|
||||||
|
; RV64-LABEL: extractelt_v3i64:
|
||||||
|
; RV64: # %bb.0:
|
||||||
|
; RV64-NEXT: addi sp, sp, -64
|
||||||
|
; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
|
||||||
|
; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
|
||||||
|
; RV64-NEXT: addi s0, sp, 64
|
||||||
|
; RV64-NEXT: andi sp, sp, -32
|
||||||
|
; RV64-NEXT: addi a1, zero, 4
|
||||||
|
; RV64-NEXT: vsetvli a1, a1, e64,m2,ta,mu
|
||||||
|
; RV64-NEXT: vle64.v v26, (a0)
|
||||||
|
; RV64-NEXT: vse64.v v26, (sp)
|
||||||
|
; RV64-NEXT: ld a0, 16(sp)
|
||||||
|
; RV64-NEXT: addi sp, s0, -64
|
||||||
|
; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
|
||||||
|
; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
|
||||||
|
; RV64-NEXT: addi sp, sp, 64
|
||||||
|
; RV64-NEXT: ret
|
||||||
|
%a = load <3 x i64>, <3 x i64>* %x
|
||||||
|
%b = extractelement <3 x i64> %a, i32 2
|
||||||
|
ret i64 %b
|
||||||
|
}
|
Loading…
Reference in New Issue