[RISCV] Reserve an emergency spill slot for any RVV spills

This patch addresses an issue in which fixed-length (VLS) vector RVV
code could fail to reserve an emergency spill slot for their frame index
elimination. This is because we were previously only reserving a spill
slot when there were `scalable-vector` frame indices being used.
However, fixed-length codegen uses regular-type frame indices if it
needs to spill.

This patch does the fairly brute-force method of checking ahead of time
whether the function contains any RVV spill instructions, in which case
it reserves one slot. Note that the second RVV slot is still only
reserved for `scalable-vector` frame indices.

This unfortunately causes quite a bit of churn in existing tests, where
we chop and change stack offsets for spill slots.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D103269
This commit is contained in:
Fraser Cormack 2021-05-26 17:56:26 +01:00
parent ad10d965c8
commit 8790e85255
20 changed files with 2555 additions and 2311 deletions

View File

@ -843,15 +843,27 @@ RISCVFrameLowering::assignRVVStackObjectOffsets(MachineFrameInfo &MFI) const {
return Offset;
}
static bool hasRVVSpillWithFIs(MachineFunction &MF, const RISCVInstrInfo &TII) {
if (!MF.getSubtarget<RISCVSubtarget>().hasStdExtV())
return false;
return any_of(MF, [&TII](const MachineBasicBlock &MBB) {
return any_of(MBB, [&TII](const MachineInstr &MI) {
return TII.isRVVSpill(MI, /*CheckFIs*/ true);
});
});
}
void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
MachineFunction &MF, RegScavenger *RS) const {
const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
const RISCVRegisterInfo *RegInfo =
MF.getSubtarget<RISCVSubtarget>().getRegisterInfo();
MachineFrameInfo &MFI = MF.getFrameInfo();
const TargetRegisterClass *RC = &RISCV::GPRRegClass;
auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
int64_t RVVStackSize = assignRVVStackObjectOffsets(MFI);
RVFI->setRVVStackSize(RVVStackSize);
const RISCVInstrInfo &TII = *MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
// estimateStackSize has been observed to under-estimate the final stack
// size, so give ourselves wiggle-room by checking for stack size
@ -859,7 +871,10 @@ void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
// FIXME: It may be possible to craft a function with a small stack that
// still needs an emergency spill slot for branch relaxation. This case
// would currently be missed.
if (!isInt<11>(MFI.estimateStackSize(MF)) || RVVStackSize != 0) {
// RVV loads & stores have no capacity to hold the immediate address offsets
// so we must always reserve an emergency spill slot if the MachineFunction
// contains any RVV spills.
if (!isInt<11>(MFI.estimateStackSize(MF)) || hasRVVSpillWithFIs(MF, TII)) {
int RegScavFI = MFI.CreateStackObject(RegInfo->getSpillSize(*RC),
RegInfo->getSpillAlign(*RC), false);
RS->addScavengingFrameIndex(RegScavFI);

View File

@ -1412,6 +1412,46 @@ Register RISCVInstrInfo::getVLENFactoredAmount(MachineFunction &MF,
return VL;
}
static bool isRVVWholeLoadStore(unsigned Opcode) {
switch (Opcode) {
default:
return false;
case RISCV::VS1R_V:
case RISCV::VS2R_V:
case RISCV::VS4R_V:
case RISCV::VS8R_V:
case RISCV::VL1RE8_V:
case RISCV::VL2RE8_V:
case RISCV::VL4RE8_V:
case RISCV::VL8RE8_V:
case RISCV::VL1RE16_V:
case RISCV::VL2RE16_V:
case RISCV::VL4RE16_V:
case RISCV::VL8RE16_V:
case RISCV::VL1RE32_V:
case RISCV::VL2RE32_V:
case RISCV::VL4RE32_V:
case RISCV::VL8RE32_V:
case RISCV::VL1RE64_V:
case RISCV::VL2RE64_V:
case RISCV::VL4RE64_V:
case RISCV::VL8RE64_V:
return true;
}
}
bool RISCVInstrInfo::isRVVSpill(const MachineInstr &MI, bool CheckFIs) const {
// RVV lacks any support for immediate addressing for stack addresses, so be
// conservative.
unsigned Opcode = MI.getOpcode();
if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
!isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
return false;
return !CheckFIs || any_of(MI.operands(), [](const MachineOperand &MO) {
return MO.isFI();
});
}
Optional<std::pair<unsigned, unsigned>>
RISCVInstrInfo::isRVVSpillForZvlsseg(unsigned Opcode) const {
switch (Opcode) {

View File

@ -147,6 +147,11 @@ public:
MachineBasicBlock::iterator II,
const DebugLoc &DL, int64_t Amount) const;
// Returns true if the given MI is an RVV instruction opcode for which we may
// expect to see a FrameIndex operand. When CheckFIs is true, the instruction
// must contain at least one FrameIndex operand.
bool isRVVSpill(const MachineInstr &MI, bool CheckFIs) const;
Optional<std::pair<unsigned, unsigned>>
isRVVSpillForZvlsseg(unsigned Opcode) const;

View File

@ -156,34 +156,6 @@ bool RISCVRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
return true;
}
static bool isRVVWholeLoadStore(unsigned Opcode) {
switch (Opcode) {
default:
return false;
case RISCV::VS1R_V:
case RISCV::VS2R_V:
case RISCV::VS4R_V:
case RISCV::VS8R_V:
case RISCV::VL1RE8_V:
case RISCV::VL2RE8_V:
case RISCV::VL4RE8_V:
case RISCV::VL8RE8_V:
case RISCV::VL1RE16_V:
case RISCV::VL2RE16_V:
case RISCV::VL4RE16_V:
case RISCV::VL8RE16_V:
case RISCV::VL1RE32_V:
case RISCV::VL2RE32_V:
case RISCV::VL4RE32_V:
case RISCV::VL8RE32_V:
case RISCV::VL1RE64_V:
case RISCV::VL2RE64_V:
case RISCV::VL4RE64_V:
case RISCV::VL8RE64_V:
return true;
}
}
void RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS) const {
@ -199,10 +171,8 @@ void RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
Register FrameReg;
StackOffset Offset =
getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
bool isRVV = RISCVVPseudosTable::getPseudoInfo(MI.getOpcode()) ||
isRVVWholeLoadStore(MI.getOpcode()) ||
TII->isRVVSpillForZvlsseg(MI.getOpcode());
if (!isRVV)
bool IsRVVSpill = TII->isRVVSpill(MI, /*CheckFIs*/ false);
if (!IsRVVSpill)
Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
if (!isInt<32>(Offset.getFixed())) {
@ -255,7 +225,7 @@ void RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// Offset = (fixed offset, 0)
MI.getOperand(FIOperandNum)
.ChangeToRegister(FrameReg, false, false, FrameRegIsKill);
if (!isRVV)
if (!IsRVVSpill)
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
else {
if (Offset.getFixed()) {
@ -286,7 +256,7 @@ void RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
.addReg(FrameReg, getKillRegState(FrameRegIsKill))
.addReg(ScalableFactorRegister, RegState::Kill);
if (isRVV && Offset.getFixed()) {
if (IsRVVSpill && Offset.getFixed()) {
// Scalable load/store has no immediate argument. We need to add the
// fixed part into the load/store base address.
BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), VL)
@ -296,7 +266,7 @@ void RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// 3. Replace address register with calculated address register
MI.getOperand(FIOperandNum).ChangeToRegister(VL, false, false, true);
if (!isRVV)
if (!IsRVVSpill)
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed());
}

View File

@ -5,12 +5,10 @@
define void @lmul1() nounwind {
; CHECK-LABEL: lmul1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: sub sp, sp, a0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = alloca <vscale x 1 x i64>
ret void
@ -19,14 +17,12 @@ define void @lmul1() nounwind {
define void @lmul2() nounwind {
; CHECK-LABEL: lmul2:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: sub sp, sp, a0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = alloca <vscale x 2 x i64>
ret void
@ -75,7 +71,6 @@ define void @lmul8() nounwind {
define void @lmul1_and_2() nounwind {
; CHECK-LABEL: lmul1_and_2:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
@ -84,7 +79,6 @@ define void @lmul1_and_2() nounwind {
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v1 = alloca <vscale x 1 x i64>
%v2 = alloca <vscale x 2 x i64>
@ -138,7 +132,6 @@ define void @lmul1_and_4() nounwind {
define void @lmul2_and_1() nounwind {
; CHECK-LABEL: lmul2_and_1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
@ -147,7 +140,6 @@ define void @lmul2_and_1() nounwind {
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v1 = alloca <vscale x 2 x i64>
%v2 = alloca <vscale x 1 x i64>
@ -250,18 +242,18 @@ define void @lmul4_and_2_x2_1() nounwind {
define void @gpr_and_lmul1_and_2() nounwind {
; CHECK-LABEL: gpr_and_lmul1_and_2:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
; CHECK-NEXT: addi a0, zero, 3
; CHECK-NEXT: sd a0, 24(sp)
; CHECK-NEXT: sd a0, 8(sp)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%x1 = alloca i64
%v1 = alloca <vscale x 1 x i64>
@ -273,21 +265,21 @@ define void @gpr_and_lmul1_and_2() nounwind {
define void @gpr_and_lmul1_and_4() nounwind {
; CHECK-LABEL: gpr_and_lmul1_and_4:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -64
; CHECK-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
; CHECK-NEXT: addi s0, sp, 64
; CHECK-NEXT: addi sp, sp, -32
; CHECK-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; CHECK-NEXT: addi s0, sp, 32
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a1, a0, 2
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: sub sp, sp, a0
; CHECK-NEXT: andi sp, sp, -32
; CHECK-NEXT: addi a0, zero, 3
; CHECK-NEXT: sd a0, 40(sp)
; CHECK-NEXT: addi sp, s0, -64
; CHECK-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 64
; CHECK-NEXT: sd a0, 8(sp)
; CHECK-NEXT: addi sp, s0, -32
; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: ret
%x1 = alloca i64
%v1 = alloca <vscale x 1 x i64>
@ -379,14 +371,12 @@ define void @lmul_1_2_4_8_x2_1() nounwind {
define void @masks() nounwind {
; CHECK-LABEL: masks:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: sub sp, sp, a0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v1 = alloca <vscale x 1 x i1>
%v2 = alloca <vscale x 2 x i1>

File diff suppressed because it is too large Load Diff

View File

@ -283,56 +283,58 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x
define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) {
; LMULMAX8-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi sp, sp, -256
; LMULMAX8-NEXT: .cfi_def_cfa_offset 256
; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: addi sp, sp, -384
; LMULMAX8-NEXT: .cfi_def_cfa_offset 384
; LMULMAX8-NEXT: sd ra, 376(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: .cfi_offset ra, -8
; LMULMAX8-NEXT: .cfi_offset s0, -16
; LMULMAX8-NEXT: addi s0, sp, 256
; LMULMAX8-NEXT: addi s0, sp, 384
; LMULMAX8-NEXT: .cfi_def_cfa s0, 0
; LMULMAX8-NEXT: andi sp, sp, -128
; LMULMAX8-NEXT: addi a2, zero, 32
; LMULMAX8-NEXT: vsetvli zero, a2, e32,m8,ta,mu
; LMULMAX8-NEXT: vle32.v v24, (a0)
; LMULMAX8-NEXT: mv a0, sp
; LMULMAX8-NEXT: addi a0, sp, 128
; LMULMAX8-NEXT: addi a2, zero, 42
; LMULMAX8-NEXT: vse32.v v8, (sp)
; LMULMAX8-NEXT: addi a3, sp, 128
; LMULMAX8-NEXT: vse32.v v8, (a3)
; LMULMAX8-NEXT: vmv8r.v v8, v24
; LMULMAX8-NEXT: call ext3@plt
; LMULMAX8-NEXT: addi sp, s0, -256
; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: addi sp, sp, 256
; LMULMAX8-NEXT: addi sp, s0, -384
; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: addi sp, sp, 384
; LMULMAX8-NEXT: ret
;
; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: addi sp, sp, -256
; LMULMAX4-NEXT: .cfi_def_cfa_offset 256
; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: addi sp, sp, -384
; LMULMAX4-NEXT: .cfi_def_cfa_offset 384
; LMULMAX4-NEXT: sd ra, 376(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: .cfi_offset ra, -8
; LMULMAX4-NEXT: .cfi_offset s0, -16
; LMULMAX4-NEXT: addi s0, sp, 256
; LMULMAX4-NEXT: addi s0, sp, 384
; LMULMAX4-NEXT: .cfi_def_cfa s0, 0
; LMULMAX4-NEXT: andi sp, sp, -128
; LMULMAX4-NEXT: vsetivli zero, 16, e32,m4,ta,mu
; LMULMAX4-NEXT: vle32.v v28, (a0)
; LMULMAX4-NEXT: addi a0, a0, 64
; LMULMAX4-NEXT: vle32.v v24, (a0)
; LMULMAX4-NEXT: addi a0, sp, 64
; LMULMAX4-NEXT: addi a0, sp, 192
; LMULMAX4-NEXT: vse32.v v12, (a0)
; LMULMAX4-NEXT: mv a0, sp
; LMULMAX4-NEXT: addi a0, sp, 128
; LMULMAX4-NEXT: addi a3, zero, 42
; LMULMAX4-NEXT: vse32.v v8, (sp)
; LMULMAX4-NEXT: addi a1, sp, 128
; LMULMAX4-NEXT: vse32.v v8, (a1)
; LMULMAX4-NEXT: vmv4r.v v8, v28
; LMULMAX4-NEXT: vmv4r.v v12, v24
; LMULMAX4-NEXT: call ext3@plt
; LMULMAX4-NEXT: addi sp, s0, -256
; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: addi sp, sp, 256
; LMULMAX4-NEXT: addi sp, s0, -384
; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: addi sp, sp, 384
; LMULMAX4-NEXT: ret
%t = call fastcc <32 x i32> @ext3(<32 x i32> %z, <32 x i32> %y, <32 x i32> %x, i32 %w, i32 42)
ret <32 x i32> %t
@ -367,13 +369,13 @@ define fastcc <32 x i32> @vector_arg_indirect_stack(i32 %0, i32 %1, i32 %2, i32
define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z) {
; LMULMAX8-LABEL: pass_vector_arg_indirect_stack:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi sp, sp, -256
; LMULMAX8-NEXT: .cfi_def_cfa_offset 256
; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: addi sp, sp, -384
; LMULMAX8-NEXT: .cfi_def_cfa_offset 384
; LMULMAX8-NEXT: sd ra, 376(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: .cfi_offset ra, -8
; LMULMAX8-NEXT: .cfi_offset s0, -16
; LMULMAX8-NEXT: addi s0, sp, 256
; LMULMAX8-NEXT: addi s0, sp, 384
; LMULMAX8-NEXT: .cfi_def_cfa s0, 0
; LMULMAX8-NEXT: andi sp, sp, -128
; LMULMAX8-NEXT: addi a0, zero, 32
@ -386,30 +388,31 @@ define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i3
; LMULMAX8-NEXT: addi a5, zero, 5
; LMULMAX8-NEXT: addi a6, zero, 6
; LMULMAX8-NEXT: addi a7, zero, 7
; LMULMAX8-NEXT: mv t2, sp
; LMULMAX8-NEXT: addi t2, sp, 128
; LMULMAX8-NEXT: addi t3, zero, 8
; LMULMAX8-NEXT: vse32.v v8, (sp)
; LMULMAX8-NEXT: addi a0, sp, 128
; LMULMAX8-NEXT: vse32.v v8, (a0)
; LMULMAX8-NEXT: mv a0, zero
; LMULMAX8-NEXT: vmv8r.v v16, v8
; LMULMAX8-NEXT: call vector_arg_indirect_stack@plt
; LMULMAX8-NEXT: addi sp, s0, -256
; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: addi sp, sp, 256
; LMULMAX8-NEXT: addi sp, s0, -384
; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: addi sp, sp, 384
; LMULMAX8-NEXT: ret
;
; LMULMAX4-LABEL: pass_vector_arg_indirect_stack:
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: addi sp, sp, -256
; LMULMAX4-NEXT: .cfi_def_cfa_offset 256
; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: addi sp, sp, -384
; LMULMAX4-NEXT: .cfi_def_cfa_offset 384
; LMULMAX4-NEXT: sd ra, 376(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: .cfi_offset ra, -8
; LMULMAX4-NEXT: .cfi_offset s0, -16
; LMULMAX4-NEXT: addi s0, sp, 256
; LMULMAX4-NEXT: addi s0, sp, 384
; LMULMAX4-NEXT: .cfi_def_cfa s0, 0
; LMULMAX4-NEXT: andi sp, sp, -128
; LMULMAX4-NEXT: addi a0, sp, 64
; LMULMAX4-NEXT: addi a0, sp, 192
; LMULMAX4-NEXT: vsetivli zero, 16, e32,m4,ta,mu
; LMULMAX4-NEXT: vmv.v.i v8, 0
; LMULMAX4-NEXT: vse32.v v8, (a0)
@ -420,18 +423,19 @@ define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i3
; LMULMAX4-NEXT: addi a5, zero, 5
; LMULMAX4-NEXT: addi a6, zero, 6
; LMULMAX4-NEXT: addi a7, zero, 7
; LMULMAX4-NEXT: mv t2, sp
; LMULMAX4-NEXT: addi t2, sp, 128
; LMULMAX4-NEXT: addi t4, zero, 8
; LMULMAX4-NEXT: vse32.v v8, (sp)
; LMULMAX4-NEXT: addi a0, sp, 128
; LMULMAX4-NEXT: vse32.v v8, (a0)
; LMULMAX4-NEXT: mv a0, zero
; LMULMAX4-NEXT: vmv4r.v v12, v8
; LMULMAX4-NEXT: vmv4r.v v16, v8
; LMULMAX4-NEXT: vmv4r.v v20, v8
; LMULMAX4-NEXT: call vector_arg_indirect_stack@plt
; LMULMAX4-NEXT: addi sp, s0, -256
; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: addi sp, sp, 256
; LMULMAX4-NEXT: addi sp, s0, -384
; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: addi sp, sp, 384
; LMULMAX4-NEXT: ret
%s = call fastcc <32 x i32> @vector_arg_indirect_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8)
ret <32 x i32> %s
@ -441,25 +445,31 @@ define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i3
define fastcc <32 x i32> @vector_arg_direct_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %last) {
; LMULMAX8-LABEL: vector_arg_direct_stack:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi sp, sp, -16
; LMULMAX8-NEXT: .cfi_def_cfa_offset 16
; LMULMAX8-NEXT: addi a0, zero, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e32,m8,ta,mu
; LMULMAX8-NEXT: addi a0, sp, 8
; LMULMAX8-NEXT: addi a0, sp, 24
; LMULMAX8-NEXT: vle32.v v24, (a0)
; LMULMAX8-NEXT: vadd.vv v8, v8, v16
; LMULMAX8-NEXT: vadd.vv v8, v8, v24
; LMULMAX8-NEXT: addi sp, sp, 16
; LMULMAX8-NEXT: ret
;
; LMULMAX4-LABEL: vector_arg_direct_stack:
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: addi sp, sp, -16
; LMULMAX4-NEXT: .cfi_def_cfa_offset 16
; LMULMAX4-NEXT: vsetivli zero, 16, e32,m4,ta,mu
; LMULMAX4-NEXT: addi a0, sp, 8
; LMULMAX4-NEXT: addi a0, sp, 24
; LMULMAX4-NEXT: vle32.v v28, (a0)
; LMULMAX4-NEXT: addi a0, sp, 72
; LMULMAX4-NEXT: addi a0, sp, 88
; LMULMAX4-NEXT: vle32.v v24, (a0)
; LMULMAX4-NEXT: vadd.vv v12, v12, v20
; LMULMAX4-NEXT: vadd.vv v8, v8, v16
; LMULMAX4-NEXT: vadd.vv v8, v8, v28
; LMULMAX4-NEXT: vadd.vv v12, v12, v24
; LMULMAX4-NEXT: addi sp, sp, 16
; LMULMAX4-NEXT: ret
%s = add <32 x i32> %x, %y
%t = add <32 x i32> %s, %z
@ -547,10 +557,13 @@ define fastcc <32 x i32> @pass_vector_arg_direct_stack(<32 x i32> %x, <32 x i32>
define fastcc <4 x i1> @vector_mask_arg_direct_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, <4 x i1> %m1, <4 x i1> %m2, i32 %last) {
; CHECK-LABEL: vector_mask_arg_direct_stack:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vsetivli zero, 4, e8,mf4,ta,mu
; CHECK-NEXT: addi a0, sp, 136
; CHECK-NEXT: addi a0, sp, 152
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vmxor.mm v0, v0, v25
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%r = xor <4 x i1> %m1, %m2
ret <4 x i1> %r

View File

@ -782,67 +782,69 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) {
; LMULMAX8-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi sp, sp, -256
; LMULMAX8-NEXT: .cfi_def_cfa_offset 256
; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: addi sp, sp, -384
; LMULMAX8-NEXT: .cfi_def_cfa_offset 384
; LMULMAX8-NEXT: sd ra, 376(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: .cfi_offset ra, -8
; LMULMAX8-NEXT: .cfi_offset s0, -16
; LMULMAX8-NEXT: addi s0, sp, 256
; LMULMAX8-NEXT: addi s0, sp, 384
; LMULMAX8-NEXT: .cfi_def_cfa s0, 0
; LMULMAX8-NEXT: andi sp, sp, -128
; LMULMAX8-NEXT: addi a2, zero, 32
; LMULMAX8-NEXT: vsetvli zero, a2, e32,m8,ta,mu
; LMULMAX8-NEXT: vle32.v v24, (a0)
; LMULMAX8-NEXT: mv a0, sp
; LMULMAX8-NEXT: addi a0, sp, 128
; LMULMAX8-NEXT: addi a2, zero, 42
; LMULMAX8-NEXT: vse32.v v8, (sp)
; LMULMAX8-NEXT: addi a3, sp, 128
; LMULMAX8-NEXT: vse32.v v8, (a3)
; LMULMAX8-NEXT: vmv8r.v v8, v24
; LMULMAX8-NEXT: call ext3@plt
; LMULMAX8-NEXT: addi sp, s0, -256
; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: addi sp, sp, 256
; LMULMAX8-NEXT: addi sp, s0, -384
; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: addi sp, sp, 384
; LMULMAX8-NEXT: ret
;
; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: addi sp, sp, -256
; LMULMAX4-NEXT: .cfi_def_cfa_offset 256
; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: addi sp, sp, -384
; LMULMAX4-NEXT: .cfi_def_cfa_offset 384
; LMULMAX4-NEXT: sd ra, 376(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: .cfi_offset ra, -8
; LMULMAX4-NEXT: .cfi_offset s0, -16
; LMULMAX4-NEXT: addi s0, sp, 256
; LMULMAX4-NEXT: addi s0, sp, 384
; LMULMAX4-NEXT: .cfi_def_cfa s0, 0
; LMULMAX4-NEXT: andi sp, sp, -128
; LMULMAX4-NEXT: vsetivli zero, 16, e32,m4,ta,mu
; LMULMAX4-NEXT: vle32.v v28, (a0)
; LMULMAX4-NEXT: addi a0, a0, 64
; LMULMAX4-NEXT: vle32.v v24, (a0)
; LMULMAX4-NEXT: addi a0, sp, 64
; LMULMAX4-NEXT: addi a0, sp, 192
; LMULMAX4-NEXT: vse32.v v12, (a0)
; LMULMAX4-NEXT: mv a0, sp
; LMULMAX4-NEXT: addi a0, sp, 128
; LMULMAX4-NEXT: addi a3, zero, 42
; LMULMAX4-NEXT: vse32.v v8, (sp)
; LMULMAX4-NEXT: addi a1, sp, 128
; LMULMAX4-NEXT: vse32.v v8, (a1)
; LMULMAX4-NEXT: vmv4r.v v8, v28
; LMULMAX4-NEXT: vmv4r.v v12, v24
; LMULMAX4-NEXT: call ext3@plt
; LMULMAX4-NEXT: addi sp, s0, -256
; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: addi sp, sp, 256
; LMULMAX4-NEXT: addi sp, s0, -384
; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: addi sp, sp, 384
; LMULMAX4-NEXT: ret
;
; LMULMAX2-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: addi sp, sp, -256
; LMULMAX2-NEXT: .cfi_def_cfa_offset 256
; LMULMAX2-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
; LMULMAX2-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
; LMULMAX2-NEXT: addi sp, sp, -384
; LMULMAX2-NEXT: .cfi_def_cfa_offset 384
; LMULMAX2-NEXT: sd ra, 376(sp) # 8-byte Folded Spill
; LMULMAX2-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; LMULMAX2-NEXT: .cfi_offset ra, -8
; LMULMAX2-NEXT: .cfi_offset s0, -16
; LMULMAX2-NEXT: addi s0, sp, 256
; LMULMAX2-NEXT: addi s0, sp, 384
; LMULMAX2-NEXT: .cfi_def_cfa s0, 0
; LMULMAX2-NEXT: andi sp, sp, -128
; LMULMAX2-NEXT: vsetivli zero, 8, e32,m2,ta,mu
@ -853,24 +855,25 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
; LMULMAX2-NEXT: vle32.v v30, (a1)
; LMULMAX2-NEXT: addi a0, a0, 96
; LMULMAX2-NEXT: vle32.v v24, (a0)
; LMULMAX2-NEXT: addi a0, sp, 96
; LMULMAX2-NEXT: addi a0, sp, 224
; LMULMAX2-NEXT: vse32.v v14, (a0)
; LMULMAX2-NEXT: addi a0, sp, 64
; LMULMAX2-NEXT: addi a0, sp, 192
; LMULMAX2-NEXT: vse32.v v12, (a0)
; LMULMAX2-NEXT: addi a0, sp, 32
; LMULMAX2-NEXT: addi a0, sp, 160
; LMULMAX2-NEXT: vse32.v v10, (a0)
; LMULMAX2-NEXT: mv a0, sp
; LMULMAX2-NEXT: addi a0, sp, 128
; LMULMAX2-NEXT: addi a5, zero, 42
; LMULMAX2-NEXT: vse32.v v8, (sp)
; LMULMAX2-NEXT: addi a1, sp, 128
; LMULMAX2-NEXT: vse32.v v8, (a1)
; LMULMAX2-NEXT: vmv2r.v v8, v26
; LMULMAX2-NEXT: vmv2r.v v10, v28
; LMULMAX2-NEXT: vmv2r.v v12, v30
; LMULMAX2-NEXT: vmv2r.v v14, v24
; LMULMAX2-NEXT: call ext3@plt
; LMULMAX2-NEXT: addi sp, s0, -256
; LMULMAX2-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
; LMULMAX2-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX2-NEXT: addi sp, sp, 256
; LMULMAX2-NEXT: addi sp, s0, -384
; LMULMAX2-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
; LMULMAX2-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; LMULMAX2-NEXT: addi sp, sp, 384
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
@ -1013,13 +1016,13 @@ define <32 x i32> @split_vector_args(<2 x i32>,<2 x i32>,<2 x i32>,<2 x i32>,<2
define <32 x i32> @call_split_vector_args(<2 x i32>* %pa, <32 x i32>* %pb) {
; LMULMAX8-LABEL: call_split_vector_args:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi sp, sp, -256
; LMULMAX8-NEXT: .cfi_def_cfa_offset 256
; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: addi sp, sp, -384
; LMULMAX8-NEXT: .cfi_def_cfa_offset 384
; LMULMAX8-NEXT: sd ra, 376(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: .cfi_offset ra, -8
; LMULMAX8-NEXT: .cfi_offset s0, -16
; LMULMAX8-NEXT: addi s0, sp, 256
; LMULMAX8-NEXT: addi s0, sp, 384
; LMULMAX8-NEXT: .cfi_def_cfa s0, 0
; LMULMAX8-NEXT: andi sp, sp, -128
; LMULMAX8-NEXT: vsetivli zero, 2, e32,mf2,ta,mu
@ -1027,28 +1030,29 @@ define <32 x i32> @call_split_vector_args(<2 x i32>* %pa, <32 x i32>* %pb) {
; LMULMAX8-NEXT: addi a0, zero, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e32,m8,ta,mu
; LMULMAX8-NEXT: vle32.v v16, (a1)
; LMULMAX8-NEXT: mv a0, sp
; LMULMAX8-NEXT: vse32.v v16, (sp)
; LMULMAX8-NEXT: addi a0, sp, 128
; LMULMAX8-NEXT: addi a1, sp, 128
; LMULMAX8-NEXT: vse32.v v16, (a1)
; LMULMAX8-NEXT: vmv1r.v v9, v8
; LMULMAX8-NEXT: vmv1r.v v10, v8
; LMULMAX8-NEXT: vmv1r.v v11, v8
; LMULMAX8-NEXT: vmv1r.v v12, v8
; LMULMAX8-NEXT: call split_vector_args@plt
; LMULMAX8-NEXT: addi sp, s0, -256
; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: addi sp, sp, 256
; LMULMAX8-NEXT: addi sp, s0, -384
; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: addi sp, sp, 384
; LMULMAX8-NEXT: ret
;
; LMULMAX4-LABEL: call_split_vector_args:
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: addi sp, sp, -256
; LMULMAX4-NEXT: .cfi_def_cfa_offset 256
; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: addi sp, sp, -384
; LMULMAX4-NEXT: .cfi_def_cfa_offset 384
; LMULMAX4-NEXT: sd ra, 376(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; LMULMAX4-NEXT: .cfi_offset ra, -8
; LMULMAX4-NEXT: .cfi_offset s0, -16
; LMULMAX4-NEXT: addi s0, sp, 256
; LMULMAX4-NEXT: addi s0, sp, 384
; LMULMAX4-NEXT: .cfi_def_cfa s0, 0
; LMULMAX4-NEXT: andi sp, sp, -128
; LMULMAX4-NEXT: vsetivli zero, 2, e32,mf2,ta,mu
@ -1057,30 +1061,31 @@ define <32 x i32> @call_split_vector_args(<2 x i32>* %pa, <32 x i32>* %pb) {
; LMULMAX4-NEXT: vle32.v v16, (a1)
; LMULMAX4-NEXT: addi a0, a1, 64
; LMULMAX4-NEXT: vle32.v v20, (a0)
; LMULMAX4-NEXT: addi a0, sp, 64
; LMULMAX4-NEXT: addi a0, sp, 192
; LMULMAX4-NEXT: vse32.v v20, (a0)
; LMULMAX4-NEXT: mv a0, sp
; LMULMAX4-NEXT: vse32.v v16, (sp)
; LMULMAX4-NEXT: addi a0, sp, 128
; LMULMAX4-NEXT: addi a1, sp, 128
; LMULMAX4-NEXT: vse32.v v16, (a1)
; LMULMAX4-NEXT: vmv1r.v v9, v8
; LMULMAX4-NEXT: vmv1r.v v10, v8
; LMULMAX4-NEXT: vmv1r.v v11, v8
; LMULMAX4-NEXT: vmv1r.v v12, v8
; LMULMAX4-NEXT: call split_vector_args@plt
; LMULMAX4-NEXT: addi sp, s0, -256
; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: addi sp, sp, 256
; LMULMAX4-NEXT: addi sp, s0, -384
; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: addi sp, sp, 384
; LMULMAX4-NEXT: ret
;
; LMULMAX2-LABEL: call_split_vector_args:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: addi sp, sp, -128
; LMULMAX2-NEXT: .cfi_def_cfa_offset 128
; LMULMAX2-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
; LMULMAX2-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
; LMULMAX2-NEXT: addi sp, sp, -256
; LMULMAX2-NEXT: .cfi_def_cfa_offset 256
; LMULMAX2-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
; LMULMAX2-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
; LMULMAX2-NEXT: .cfi_offset ra, -8
; LMULMAX2-NEXT: .cfi_offset s0, -16
; LMULMAX2-NEXT: addi s0, sp, 128
; LMULMAX2-NEXT: addi s0, sp, 256
; LMULMAX2-NEXT: .cfi_def_cfa s0, 0
; LMULMAX2-NEXT: andi sp, sp, -128
; LMULMAX2-NEXT: vsetivli zero, 2, e32,mf2,ta,mu
@ -1093,33 +1098,34 @@ define <32 x i32> @call_split_vector_args(<2 x i32>* %pa, <32 x i32>* %pb) {
; LMULMAX2-NEXT: vle32.v v18, (a0)
; LMULMAX2-NEXT: addi a0, a1, 96
; LMULMAX2-NEXT: vle32.v v20, (a0)
; LMULMAX2-NEXT: addi a0, sp, 64
; LMULMAX2-NEXT: addi a0, sp, 192
; LMULMAX2-NEXT: vse32.v v20, (a0)
; LMULMAX2-NEXT: addi a0, sp, 32
; LMULMAX2-NEXT: addi a0, sp, 160
; LMULMAX2-NEXT: vse32.v v18, (a0)
; LMULMAX2-NEXT: mv a0, sp
; LMULMAX2-NEXT: vse32.v v16, (sp)
; LMULMAX2-NEXT: addi a0, sp, 128
; LMULMAX2-NEXT: addi a1, sp, 128
; LMULMAX2-NEXT: vse32.v v16, (a1)
; LMULMAX2-NEXT: vmv1r.v v9, v8
; LMULMAX2-NEXT: vmv1r.v v10, v8
; LMULMAX2-NEXT: vmv1r.v v11, v8
; LMULMAX2-NEXT: vmv1r.v v12, v8
; LMULMAX2-NEXT: vmv2r.v v22, v14
; LMULMAX2-NEXT: call split_vector_args@plt
; LMULMAX2-NEXT: addi sp, s0, -128
; LMULMAX2-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
; LMULMAX2-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; LMULMAX2-NEXT: addi sp, sp, 128
; LMULMAX2-NEXT: addi sp, s0, -256
; LMULMAX2-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
; LMULMAX2-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX2-NEXT: addi sp, sp, 256
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: call_split_vector_args:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi sp, sp, -128
; LMULMAX1-NEXT: .cfi_def_cfa_offset 128
; LMULMAX1-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
; LMULMAX1-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
; LMULMAX1-NEXT: addi sp, sp, -256
; LMULMAX1-NEXT: .cfi_def_cfa_offset 256
; LMULMAX1-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
; LMULMAX1-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
; LMULMAX1-NEXT: .cfi_offset ra, -8
; LMULMAX1-NEXT: .cfi_offset s0, -16
; LMULMAX1-NEXT: addi s0, sp, 128
; LMULMAX1-NEXT: addi s0, sp, 256
; LMULMAX1-NEXT: .cfi_def_cfa s0, 0
; LMULMAX1-NEXT: andi sp, sp, -128
; LMULMAX1-NEXT: vsetivli zero, 2, e32,mf2,ta,mu
@ -1140,16 +1146,17 @@ define <32 x i32> @call_split_vector_args(<2 x i32>* %pa, <32 x i32>* %pb) {
; LMULMAX1-NEXT: vle32.v v19, (a0)
; LMULMAX1-NEXT: addi a0, a1, 112
; LMULMAX1-NEXT: vle32.v v20, (a0)
; LMULMAX1-NEXT: addi a0, sp, 64
; LMULMAX1-NEXT: addi a0, sp, 192
; LMULMAX1-NEXT: vse32.v v20, (a0)
; LMULMAX1-NEXT: addi a0, sp, 48
; LMULMAX1-NEXT: addi a0, sp, 176
; LMULMAX1-NEXT: vse32.v v19, (a0)
; LMULMAX1-NEXT: addi a0, sp, 32
; LMULMAX1-NEXT: addi a0, sp, 160
; LMULMAX1-NEXT: vse32.v v18, (a0)
; LMULMAX1-NEXT: addi a0, sp, 16
; LMULMAX1-NEXT: addi a0, sp, 144
; LMULMAX1-NEXT: vse32.v v17, (a0)
; LMULMAX1-NEXT: mv a0, sp
; LMULMAX1-NEXT: vse32.v v16, (sp)
; LMULMAX1-NEXT: addi a0, sp, 128
; LMULMAX1-NEXT: addi a1, sp, 128
; LMULMAX1-NEXT: vse32.v v16, (a1)
; LMULMAX1-NEXT: vmv1r.v v9, v8
; LMULMAX1-NEXT: vmv1r.v v10, v8
; LMULMAX1-NEXT: vmv1r.v v11, v8
@ -1158,10 +1165,10 @@ define <32 x i32> @call_split_vector_args(<2 x i32>* %pa, <32 x i32>* %pb) {
; LMULMAX1-NEXT: vmv1r.v v22, v14
; LMULMAX1-NEXT: vmv1r.v v23, v15
; LMULMAX1-NEXT: call split_vector_args@plt
; LMULMAX1-NEXT: addi sp, s0, -128
; LMULMAX1-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
; LMULMAX1-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; LMULMAX1-NEXT: addi sp, sp, 128
; LMULMAX1-NEXT: addi sp, s0, -256
; LMULMAX1-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
; LMULMAX1-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX1-NEXT: addi sp, sp, 256
; LMULMAX1-NEXT: ret
%a = load <2 x i32>, <2 x i32>* %pa
%b = load <32 x i32>, <32 x i32>* %pb
@ -1174,55 +1181,70 @@ define <32 x i32> @call_split_vector_args(<2 x i32>* %pa, <32 x i32>* %pb) {
define <32 x i32> @vector_arg_via_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %8) {
; LMULMAX8-LABEL: vector_arg_via_stack:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi sp, sp, -16
; LMULMAX8-NEXT: .cfi_def_cfa_offset 16
; LMULMAX8-NEXT: addi a0, zero, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e32,m8,ta,mu
; LMULMAX8-NEXT: vle32.v v16, (sp)
; LMULMAX8-NEXT: addi a0, sp, 16
; LMULMAX8-NEXT: vle32.v v16, (a0)
; LMULMAX8-NEXT: vadd.vv v8, v8, v16
; LMULMAX8-NEXT: addi sp, sp, 16
; LMULMAX8-NEXT: ret
;
; LMULMAX4-LABEL: vector_arg_via_stack:
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: addi sp, sp, -16
; LMULMAX4-NEXT: .cfi_def_cfa_offset 16
; LMULMAX4-NEXT: vsetivli zero, 16, e32,m4,ta,mu
; LMULMAX4-NEXT: vle32.v v28, (sp)
; LMULMAX4-NEXT: addi a0, sp, 64
; LMULMAX4-NEXT: addi a0, sp, 16
; LMULMAX4-NEXT: vle32.v v28, (a0)
; LMULMAX4-NEXT: addi a0, sp, 80
; LMULMAX4-NEXT: vle32.v v16, (a0)
; LMULMAX4-NEXT: vadd.vv v8, v8, v28
; LMULMAX4-NEXT: vadd.vv v12, v12, v16
; LMULMAX4-NEXT: addi sp, sp, 16
; LMULMAX4-NEXT: ret
;
; LMULMAX2-LABEL: vector_arg_via_stack:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: addi sp, sp, -16
; LMULMAX2-NEXT: .cfi_def_cfa_offset 16
; LMULMAX2-NEXT: vsetivli zero, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (sp)
; LMULMAX2-NEXT: addi a0, sp, 32
; LMULMAX2-NEXT: addi a0, sp, 16
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: addi a0, sp, 48
; LMULMAX2-NEXT: vle32.v v28, (a0)
; LMULMAX2-NEXT: addi a0, sp, 64
; LMULMAX2-NEXT: addi a0, sp, 80
; LMULMAX2-NEXT: vle32.v v30, (a0)
; LMULMAX2-NEXT: addi a0, sp, 96
; LMULMAX2-NEXT: addi a0, sp, 112
; LMULMAX2-NEXT: vle32.v v16, (a0)
; LMULMAX2-NEXT: vadd.vv v8, v8, v26
; LMULMAX2-NEXT: vadd.vv v10, v10, v28
; LMULMAX2-NEXT: vadd.vv v12, v12, v30
; LMULMAX2-NEXT: vadd.vv v14, v14, v16
; LMULMAX2-NEXT: addi sp, sp, 16
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: vector_arg_via_stack:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi sp, sp, -16
; LMULMAX1-NEXT: .cfi_def_cfa_offset 16
; LMULMAX1-NEXT: vsetivli zero, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: addi a0, sp, 112
; LMULMAX1-NEXT: addi a0, sp, 128
; LMULMAX1-NEXT: vle32.v v25, (a0)
; LMULMAX1-NEXT: addi a0, sp, 96
; LMULMAX1-NEXT: addi a0, sp, 112
; LMULMAX1-NEXT: vle32.v v26, (a0)
; LMULMAX1-NEXT: addi a0, sp, 80
; LMULMAX1-NEXT: addi a0, sp, 96
; LMULMAX1-NEXT: vle32.v v27, (a0)
; LMULMAX1-NEXT: addi a0, sp, 64
; LMULMAX1-NEXT: addi a0, sp, 80
; LMULMAX1-NEXT: vle32.v v28, (a0)
; LMULMAX1-NEXT: vle32.v v29, (sp)
; LMULMAX1-NEXT: addi a0, sp, 16
; LMULMAX1-NEXT: vle32.v v30, (a0)
; LMULMAX1-NEXT: vle32.v v29, (a0)
; LMULMAX1-NEXT: addi a0, sp, 32
; LMULMAX1-NEXT: vle32.v v31, (a0)
; LMULMAX1-NEXT: vle32.v v30, (a0)
; LMULMAX1-NEXT: addi a0, sp, 48
; LMULMAX1-NEXT: vle32.v v31, (a0)
; LMULMAX1-NEXT: addi a0, sp, 64
; LMULMAX1-NEXT: vle32.v v16, (a0)
; LMULMAX1-NEXT: vadd.vv v8, v8, v29
; LMULMAX1-NEXT: vadd.vv v9, v9, v30
@ -1232,6 +1254,7 @@ define <32 x i32> @vector_arg_via_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4,
; LMULMAX1-NEXT: vadd.vv v13, v13, v27
; LMULMAX1-NEXT: vadd.vv v14, v14, v26
; LMULMAX1-NEXT: vadd.vv v15, v15, v25
; LMULMAX1-NEXT: addi sp, sp, 16
; LMULMAX1-NEXT: ret
%s = add <32 x i32> %x, %z
ret <32 x i32> %s
@ -1392,9 +1415,12 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
define <4 x i1> @vector_mask_arg_via_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %8, <4 x i1> %9, <4 x i1> %10) {
; CHECK-LABEL: vector_mask_arg_via_stack:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vsetivli zero, 4, e8,mf4,ta,mu
; CHECK-NEXT: addi a0, sp, 136
; CHECK-NEXT: addi a0, sp, 152
; CHECK-NEXT: vle1.v v0, (a0)
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
ret <4 x i1> %10
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,59 @@
# NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
# RUN: llc -mtriple riscv64 -mattr=+experimental-v -start-before=prologepilog -o - \
# RUN: -verify-machineinstrs %s | FileCheck %s
--- |
target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
target triple = "riscv64"
define weak_odr dso_local void @fixedlen_vector_spillslot(i8* %ay) nounwind {
; CHECK-LABEL: fixedlen_vector_spillslot:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -48
; CHECK-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd a0, 32(sp)
; CHECK-NEXT: sd a0, 16(sp)
; CHECK-NEXT: vsetivli a5, 1, e16,m1,ta,mu
; CHECK-NEXT: sd a1, 8(sp)
; CHECK-NEXT: addi a1, sp, 24
; CHECK-NEXT: vs1r.v v25, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: ld a1, 8(sp)
; CHECK-NEXT: call fixedlen_vector_spillslot@plt
; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 48
; CHECK-NEXT: ret
entry:
ret void
}
...
---
name: fixedlen_vector_spillslot
alignment: 2
tracksRegLiveness: false
fixedStack: []
stack:
- { id: 0, name: '', type: default, offset: 0, size: 8, alignment: 8,
stack-id: default, callee-saved-register: '', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 1, name: '', type: spill-slot, offset: 0, size: 2, alignment: 8,
stack-id: default, callee-saved-register: '', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
- { id: 2, name: '', type: default, offset: 0, size: 8, alignment: 8,
stack-id: default, callee-saved-register: '', callee-saved-restored: true,
debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
body: |
bb.0.entry:
liveins: $x1, $x5, $x6, $x7, $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $x28, $x29, $x30, $x31, $v25
SD $x10, %stack.0, 0
SD $x10, %stack.2, 0
dead renamable $x15 = PseudoVSETIVLI 1, 72, implicit-def $vl, implicit-def $vtype
PseudoVSPILL_M1 killed renamable $v25, %stack.1 :: (store unknown-size into %stack.1, align 8)
; This is here just to make all the eligible registers live at this point.
; This way when we replace the frame index %stack.1 with its actual address
; we have to allocate a virtual register to compute it.
; A later run of the the register scavenger won't find an available register
; either so it will have to spill one to the emergency spill slot.
PseudoCALL target-flags(riscv-plt) @fixedlen_vector_spillslot, csr_ilp32_lp64, implicit-def $x1, implicit-def $x2, implicit $x1, implicit $x5, implicit $x6, implicit $x7, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x28, implicit $x29, implicit $x30, implicit $x31
PseudoRET
...

View File

@ -151,10 +151,10 @@ define i1 @extractelt_v128i1(<128 x i8>* %x, i64 %idx) nounwind {
define i1 @extractelt_v256i1(<256 x i8>* %x, i64 %idx) nounwind {
; RV32-LABEL: extractelt_v256i1:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -384
; RV32-NEXT: sw ra, 380(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 376(sp) # 4-byte Folded Spill
; RV32-NEXT: addi s0, sp, 384
; RV32-NEXT: addi sp, sp, -512
; RV32-NEXT: sw ra, 508(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 504(sp) # 4-byte Folded Spill
; RV32-NEXT: addi s0, sp, 512
; RV32-NEXT: andi sp, sp, -128
; RV32-NEXT: andi a1, a1, 255
; RV32-NEXT: addi a2, a0, 128
@ -162,30 +162,31 @@ define i1 @extractelt_v256i1(<256 x i8>* %x, i64 %idx) nounwind {
; RV32-NEXT: vsetvli zero, a3, e8,m8,ta,mu
; RV32-NEXT: vle8.v v8, (a0)
; RV32-NEXT: vle8.v v16, (a2)
; RV32-NEXT: mv a0, sp
; RV32-NEXT: addi a0, sp, 128
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: vmseq.vi v25, v8, 0
; RV32-NEXT: vmseq.vi v0, v16, 0
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: vmerge.vim v16, v8, 1, v0
; RV32-NEXT: addi a1, sp, 128
; RV32-NEXT: addi a1, sp, 256
; RV32-NEXT: vse8.v v16, (a1)
; RV32-NEXT: vmv1r.v v0, v25
; RV32-NEXT: vmerge.vim v8, v8, 1, v0
; RV32-NEXT: vse8.v v8, (sp)
; RV32-NEXT: addi a1, sp, 128
; RV32-NEXT: vse8.v v8, (a1)
; RV32-NEXT: lb a0, 0(a0)
; RV32-NEXT: addi sp, s0, -384
; RV32-NEXT: lw s0, 376(sp) # 4-byte Folded Reload
; RV32-NEXT: lw ra, 380(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 384
; RV32-NEXT: addi sp, s0, -512
; RV32-NEXT: lw s0, 504(sp) # 4-byte Folded Reload
; RV32-NEXT: lw ra, 508(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 512
; RV32-NEXT: ret
;
; RV64-LABEL: extractelt_v256i1:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -384
; RV64-NEXT: sd ra, 376(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 368(sp) # 8-byte Folded Spill
; RV64-NEXT: addi s0, sp, 384
; RV64-NEXT: addi sp, sp, -512
; RV64-NEXT: sd ra, 504(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 496(sp) # 8-byte Folded Spill
; RV64-NEXT: addi s0, sp, 512
; RV64-NEXT: andi sp, sp, -128
; RV64-NEXT: andi a1, a1, 255
; RV64-NEXT: addi a2, a0, 128
@ -193,22 +194,23 @@ define i1 @extractelt_v256i1(<256 x i8>* %x, i64 %idx) nounwind {
; RV64-NEXT: vsetvli zero, a3, e8,m8,ta,mu
; RV64-NEXT: vle8.v v8, (a0)
; RV64-NEXT: vle8.v v16, (a2)
; RV64-NEXT: mv a0, sp
; RV64-NEXT: addi a0, sp, 128
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: vmseq.vi v25, v8, 0
; RV64-NEXT: vmseq.vi v0, v16, 0
; RV64-NEXT: vmv.v.i v8, 0
; RV64-NEXT: vmerge.vim v16, v8, 1, v0
; RV64-NEXT: addi a1, sp, 128
; RV64-NEXT: addi a1, sp, 256
; RV64-NEXT: vse8.v v16, (a1)
; RV64-NEXT: vmv1r.v v0, v25
; RV64-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-NEXT: vse8.v v8, (sp)
; RV64-NEXT: addi a1, sp, 128
; RV64-NEXT: vse8.v v8, (a1)
; RV64-NEXT: lb a0, 0(a0)
; RV64-NEXT: addi sp, s0, -384
; RV64-NEXT: ld s0, 368(sp) # 8-byte Folded Reload
; RV64-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 384
; RV64-NEXT: addi sp, s0, -512
; RV64-NEXT: ld s0, 496(sp) # 8-byte Folded Reload
; RV64-NEXT: ld ra, 504(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 512
; RV64-NEXT: ret
%a = load <256 x i8>, <256 x i8>* %x
%b = icmp eq <256 x i8> %a, zeroinitializer

View File

@ -35,23 +35,24 @@ define void @buildvec_no_vid_v4f32(<4 x float>* %x) {
define <4 x float> @hang_when_merging_stores_after_legalization(<8 x float> %x, <8 x float> %y) optsize {
; LMULMAX1-LABEL: hang_when_merging_stores_after_legalization:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi sp, sp, -16
; LMULMAX1-NEXT: .cfi_def_cfa_offset 16
; LMULMAX1-NEXT: addi sp, sp, -32
; LMULMAX1-NEXT: .cfi_def_cfa_offset 32
; LMULMAX1-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; LMULMAX1-NEXT: vfmv.f.s ft0, v10
; LMULMAX1-NEXT: fsw ft0, 8(sp)
; LMULMAX1-NEXT: fsw ft0, 24(sp)
; LMULMAX1-NEXT: vfmv.f.s ft0, v8
; LMULMAX1-NEXT: fsw ft0, 0(sp)
; LMULMAX1-NEXT: fsw ft0, 16(sp)
; LMULMAX1-NEXT: vsetivli zero, 1, e32,m2,ta,mu
; LMULMAX1-NEXT: vslidedown.vi v26, v10, 7
; LMULMAX1-NEXT: vfmv.f.s ft0, v26
; LMULMAX1-NEXT: fsw ft0, 12(sp)
; LMULMAX1-NEXT: fsw ft0, 28(sp)
; LMULMAX1-NEXT: vslidedown.vi v26, v8, 7
; LMULMAX1-NEXT: vfmv.f.s ft0, v26
; LMULMAX1-NEXT: fsw ft0, 4(sp)
; LMULMAX1-NEXT: fsw ft0, 20(sp)
; LMULMAX1-NEXT: vsetivli zero, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vle32.v v8, (sp)
; LMULMAX1-NEXT: addi sp, sp, 16
; LMULMAX1-NEXT: addi a0, sp, 16
; LMULMAX1-NEXT: vle32.v v8, (a0)
; LMULMAX1-NEXT: addi sp, sp, 32
; LMULMAX1-NEXT: ret
;
; LMULMAX2-LABEL: hang_when_merging_stores_after_legalization:

View File

@ -163,22 +163,24 @@ define void @fpround_v8f32_v8f16(<8 x float>* %x, <8 x half>* %y) {
;
; LMULMAX1-LABEL: fpround_v8f32_v8f16:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi sp, sp, -16
; LMULMAX1-NEXT: .cfi_def_cfa_offset 16
; LMULMAX1-NEXT: addi sp, sp, -32
; LMULMAX1-NEXT: .cfi_def_cfa_offset 32
; LMULMAX1-NEXT: vsetivli zero, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: addi a2, a0, 16
; LMULMAX1-NEXT: vle32.v v25, (a2)
; LMULMAX1-NEXT: vle32.v v26, (a0)
; LMULMAX1-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; LMULMAX1-NEXT: vfncvt.f.f.w v27, v25
; LMULMAX1-NEXT: addi a0, sp, 8
; LMULMAX1-NEXT: addi a0, sp, 24
; LMULMAX1-NEXT: vse16.v v27, (a0)
; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26
; LMULMAX1-NEXT: vse16.v v25, (sp)
; LMULMAX1-NEXT: addi a0, sp, 16
; LMULMAX1-NEXT: vse16.v v25, (a0)
; LMULMAX1-NEXT: vsetivli zero, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vle16.v v25, (sp)
; LMULMAX1-NEXT: addi a0, sp, 16
; LMULMAX1-NEXT: vle16.v v25, (a0)
; LMULMAX1-NEXT: vse16.v v25, (a1)
; LMULMAX1-NEXT: addi sp, sp, 16
; LMULMAX1-NEXT: addi sp, sp, 32
; LMULMAX1-NEXT: ret
%a = load <8 x float>, <8 x float>* %x
%d = fptrunc <8 x float> %a to <8 x half>
@ -200,8 +202,8 @@ define void @fpround_v8f64_v8f16(<8 x double>* %x, <8 x half>* %y) {
;
; LMULMAX1-LABEL: fpround_v8f64_v8f16:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi sp, sp, -32
; LMULMAX1-NEXT: .cfi_def_cfa_offset 32
; LMULMAX1-NEXT: addi sp, sp, -48
; LMULMAX1-NEXT: .cfi_def_cfa_offset 48
; LMULMAX1-NEXT: vsetivli zero, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vle64.v v25, (a0)
; LMULMAX1-NEXT: addi a2, a0, 32
@ -214,39 +216,41 @@ define void @fpround_v8f64_v8f16(<8 x double>* %x, <8 x half>* %y) {
; LMULMAX1-NEXT: vfncvt.rod.f.f.w v29, v27
; LMULMAX1-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; LMULMAX1-NEXT: vfncvt.f.f.w v27, v29
; LMULMAX1-NEXT: addi a0, sp, 12
; LMULMAX1-NEXT: addi a0, sp, 28
; LMULMAX1-NEXT: vse16.v v27, (a0)
; LMULMAX1-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; LMULMAX1-NEXT: vfncvt.rod.f.f.w v27, v28
; LMULMAX1-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; LMULMAX1-NEXT: vfncvt.f.f.w v28, v27
; LMULMAX1-NEXT: addi a0, sp, 4
; LMULMAX1-NEXT: addi a0, sp, 20
; LMULMAX1-NEXT: vse16.v v28, (a0)
; LMULMAX1-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; LMULMAX1-NEXT: vfncvt.rod.f.f.w v27, v26
; LMULMAX1-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; LMULMAX1-NEXT: vfncvt.f.f.w v26, v27
; LMULMAX1-NEXT: addi a0, sp, 8
; LMULMAX1-NEXT: addi a0, sp, 24
; LMULMAX1-NEXT: vse16.v v26, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e16,mf2,ta,mu
; LMULMAX1-NEXT: addi a0, sp, 8
; LMULMAX1-NEXT: vle16.v v26, (a0)
; LMULMAX1-NEXT: addi a0, sp, 24
; LMULMAX1-NEXT: vle16.v v26, (a0)
; LMULMAX1-NEXT: addi a0, sp, 40
; LMULMAX1-NEXT: vse16.v v26, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e32,mf2,ta,mu
; LMULMAX1-NEXT: vfncvt.rod.f.f.w v26, v25
; LMULMAX1-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26
; LMULMAX1-NEXT: vse16.v v25, (sp)
; LMULMAX1-NEXT: vsetivli zero, 4, e16,mf2,ta,mu
; LMULMAX1-NEXT: vle16.v v25, (sp)
; LMULMAX1-NEXT: addi a0, sp, 16
; LMULMAX1-NEXT: vse16.v v25, (a0)
; LMULMAX1-NEXT: vsetivli zero, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vsetivli zero, 4, e16,mf2,ta,mu
; LMULMAX1-NEXT: addi a0, sp, 16
; LMULMAX1-NEXT: vle16.v v25, (a0)
; LMULMAX1-NEXT: addi a0, sp, 32
; LMULMAX1-NEXT: vse16.v v25, (a0)
; LMULMAX1-NEXT: vsetivli zero, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: addi a0, sp, 32
; LMULMAX1-NEXT: vle16.v v25, (a0)
; LMULMAX1-NEXT: vse16.v v25, (a1)
; LMULMAX1-NEXT: addi sp, sp, 32
; LMULMAX1-NEXT: addi sp, sp, 48
; LMULMAX1-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
%d = fptrunc <8 x double> %a to <8 x half>

View File

@ -466,8 +466,8 @@ define void @si2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
;
; LMULMAX1-LABEL: si2fp_v8i64_v8f16:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi sp, sp, -32
; LMULMAX1-NEXT: .cfi_def_cfa_offset 32
; LMULMAX1-NEXT: addi sp, sp, -48
; LMULMAX1-NEXT: .cfi_def_cfa_offset 48
; LMULMAX1-NEXT: vsetivli zero, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vle64.v v25, (a0)
; LMULMAX1-NEXT: addi a2, a0, 32
@ -480,39 +480,41 @@ define void @si2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
; LMULMAX1-NEXT: vfncvt.f.x.w v29, v27
; LMULMAX1-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; LMULMAX1-NEXT: vfncvt.f.f.w v27, v29
; LMULMAX1-NEXT: addi a0, sp, 12
; LMULMAX1-NEXT: addi a0, sp, 28
; LMULMAX1-NEXT: vse16.v v27, (a0)
; LMULMAX1-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; LMULMAX1-NEXT: vfncvt.f.x.w v27, v28
; LMULMAX1-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; LMULMAX1-NEXT: vfncvt.f.f.w v28, v27
; LMULMAX1-NEXT: addi a0, sp, 4
; LMULMAX1-NEXT: addi a0, sp, 20
; LMULMAX1-NEXT: vse16.v v28, (a0)
; LMULMAX1-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; LMULMAX1-NEXT: vfncvt.f.x.w v27, v26
; LMULMAX1-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; LMULMAX1-NEXT: vfncvt.f.f.w v26, v27
; LMULMAX1-NEXT: addi a0, sp, 8
; LMULMAX1-NEXT: addi a0, sp, 24
; LMULMAX1-NEXT: vse16.v v26, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e16,mf2,ta,mu
; LMULMAX1-NEXT: addi a0, sp, 8
; LMULMAX1-NEXT: vle16.v v26, (a0)
; LMULMAX1-NEXT: addi a0, sp, 24
; LMULMAX1-NEXT: vle16.v v26, (a0)
; LMULMAX1-NEXT: addi a0, sp, 40
; LMULMAX1-NEXT: vse16.v v26, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e32,mf2,ta,mu
; LMULMAX1-NEXT: vfncvt.f.x.w v26, v25
; LMULMAX1-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26
; LMULMAX1-NEXT: vse16.v v25, (sp)
; LMULMAX1-NEXT: vsetivli zero, 4, e16,mf2,ta,mu
; LMULMAX1-NEXT: vle16.v v25, (sp)
; LMULMAX1-NEXT: addi a0, sp, 16
; LMULMAX1-NEXT: vse16.v v25, (a0)
; LMULMAX1-NEXT: vsetivli zero, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vsetivli zero, 4, e16,mf2,ta,mu
; LMULMAX1-NEXT: addi a0, sp, 16
; LMULMAX1-NEXT: vle16.v v25, (a0)
; LMULMAX1-NEXT: addi a0, sp, 32
; LMULMAX1-NEXT: vse16.v v25, (a0)
; LMULMAX1-NEXT: vsetivli zero, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: addi a0, sp, 32
; LMULMAX1-NEXT: vle16.v v25, (a0)
; LMULMAX1-NEXT: vse16.v v25, (a1)
; LMULMAX1-NEXT: addi sp, sp, 32
; LMULMAX1-NEXT: addi sp, sp, 48
; LMULMAX1-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x
%d = sitofp <8 x i64> %a to <8 x half>
@ -534,8 +536,8 @@ define void @ui2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
;
; LMULMAX1-LABEL: ui2fp_v8i64_v8f16:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi sp, sp, -32
; LMULMAX1-NEXT: .cfi_def_cfa_offset 32
; LMULMAX1-NEXT: addi sp, sp, -48
; LMULMAX1-NEXT: .cfi_def_cfa_offset 48
; LMULMAX1-NEXT: vsetivli zero, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vle64.v v25, (a0)
; LMULMAX1-NEXT: addi a2, a0, 32
@ -548,39 +550,41 @@ define void @ui2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
; LMULMAX1-NEXT: vfncvt.f.xu.w v29, v27
; LMULMAX1-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; LMULMAX1-NEXT: vfncvt.f.f.w v27, v29
; LMULMAX1-NEXT: addi a0, sp, 12
; LMULMAX1-NEXT: addi a0, sp, 28
; LMULMAX1-NEXT: vse16.v v27, (a0)
; LMULMAX1-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; LMULMAX1-NEXT: vfncvt.f.xu.w v27, v28
; LMULMAX1-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; LMULMAX1-NEXT: vfncvt.f.f.w v28, v27
; LMULMAX1-NEXT: addi a0, sp, 4
; LMULMAX1-NEXT: addi a0, sp, 20
; LMULMAX1-NEXT: vse16.v v28, (a0)
; LMULMAX1-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; LMULMAX1-NEXT: vfncvt.f.xu.w v27, v26
; LMULMAX1-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; LMULMAX1-NEXT: vfncvt.f.f.w v26, v27
; LMULMAX1-NEXT: addi a0, sp, 8
; LMULMAX1-NEXT: addi a0, sp, 24
; LMULMAX1-NEXT: vse16.v v26, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e16,mf2,ta,mu
; LMULMAX1-NEXT: addi a0, sp, 8
; LMULMAX1-NEXT: vle16.v v26, (a0)
; LMULMAX1-NEXT: addi a0, sp, 24
; LMULMAX1-NEXT: vle16.v v26, (a0)
; LMULMAX1-NEXT: addi a0, sp, 40
; LMULMAX1-NEXT: vse16.v v26, (a0)
; LMULMAX1-NEXT: vsetivli zero, 2, e32,mf2,ta,mu
; LMULMAX1-NEXT: vfncvt.f.xu.w v26, v25
; LMULMAX1-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26
; LMULMAX1-NEXT: vse16.v v25, (sp)
; LMULMAX1-NEXT: vsetivli zero, 4, e16,mf2,ta,mu
; LMULMAX1-NEXT: vle16.v v25, (sp)
; LMULMAX1-NEXT: addi a0, sp, 16
; LMULMAX1-NEXT: vse16.v v25, (a0)
; LMULMAX1-NEXT: vsetivli zero, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vsetivli zero, 4, e16,mf2,ta,mu
; LMULMAX1-NEXT: addi a0, sp, 16
; LMULMAX1-NEXT: vle16.v v25, (a0)
; LMULMAX1-NEXT: addi a0, sp, 32
; LMULMAX1-NEXT: vse16.v v25, (a0)
; LMULMAX1-NEXT: vsetivli zero, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: addi a0, sp, 32
; LMULMAX1-NEXT: vle16.v v25, (a0)
; LMULMAX1-NEXT: vse16.v v25, (a1)
; LMULMAX1-NEXT: addi sp, sp, 32
; LMULMAX1-NEXT: addi sp, sp, 48
; LMULMAX1-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x
%d = uitofp <8 x i64> %a to <8 x half>

File diff suppressed because it is too large Load Diff

View File

@ -7,8 +7,8 @@
define <4 x i32> @load_v4i32_align1(<4 x i32>* %ptr) {
; RV32-LABEL: load_v4i32_align1:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: addi sp, sp, -32
; RV32-NEXT: .cfi_def_cfa_offset 32
; RV32-NEXT: lbu a1, 13(a0)
; RV32-NEXT: lbu a2, 12(a0)
; RV32-NEXT: lbu a3, 15(a0)
@ -19,7 +19,7 @@ define <4 x i32> @load_v4i32_align1(<4 x i32>* %ptr) {
; RV32-NEXT: or a2, a2, a4
; RV32-NEXT: slli a2, a2, 16
; RV32-NEXT: or a1, a2, a1
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a1, 28(sp)
; RV32-NEXT: lbu a1, 9(a0)
; RV32-NEXT: lbu a2, 8(a0)
; RV32-NEXT: lbu a3, 11(a0)
@ -30,7 +30,7 @@ define <4 x i32> @load_v4i32_align1(<4 x i32>* %ptr) {
; RV32-NEXT: or a2, a2, a4
; RV32-NEXT: slli a2, a2, 16
; RV32-NEXT: or a1, a2, a1
; RV32-NEXT: sw a1, 8(sp)
; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lbu a1, 5(a0)
; RV32-NEXT: lbu a2, 4(a0)
; RV32-NEXT: lbu a3, 7(a0)
@ -41,7 +41,7 @@ define <4 x i32> @load_v4i32_align1(<4 x i32>* %ptr) {
; RV32-NEXT: or a2, a2, a4
; RV32-NEXT: slli a2, a2, 16
; RV32-NEXT: or a1, a2, a1
; RV32-NEXT: sw a1, 4(sp)
; RV32-NEXT: sw a1, 20(sp)
; RV32-NEXT: lbu a1, 1(a0)
; RV32-NEXT: lbu a2, 0(a0)
; RV32-NEXT: lbu a3, 3(a0)
@ -52,16 +52,17 @@ define <4 x i32> @load_v4i32_align1(<4 x i32>* %ptr) {
; RV32-NEXT: or a0, a2, a0
; RV32-NEXT: slli a0, a0, 16
; RV32-NEXT: or a0, a0, a1
; RV32-NEXT: sw a0, 0(sp)
; RV32-NEXT: sw a0, 16(sp)
; RV32-NEXT: vsetivli zero, 4, e32,m1,ta,mu
; RV32-NEXT: vle32.v v8, (sp)
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: addi sp, sp, 32
; RV32-NEXT: ret
;
; RV64-LABEL: load_v4i32_align1:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: addi sp, sp, -32
; RV64-NEXT: .cfi_def_cfa_offset 32
; RV64-NEXT: lbu a1, 9(a0)
; RV64-NEXT: lbu a2, 8(a0)
; RV64-NEXT: lbu a3, 11(a0)
@ -84,7 +85,7 @@ define <4 x i32> @load_v4i32_align1(<4 x i32>* %ptr) {
; RV64-NEXT: or a2, a3, a2
; RV64-NEXT: slli a2, a2, 32
; RV64-NEXT: or a1, a2, a1
; RV64-NEXT: sd a1, 8(sp)
; RV64-NEXT: sd a1, 24(sp)
; RV64-NEXT: lbu a1, 1(a0)
; RV64-NEXT: lbu a2, 0(a0)
; RV64-NEXT: lbu a3, 3(a0)
@ -107,10 +108,11 @@ define <4 x i32> @load_v4i32_align1(<4 x i32>* %ptr) {
; RV64-NEXT: or a0, a0, a2
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: or a0, a0, a1
; RV64-NEXT: sd a0, 0(sp)
; RV64-NEXT: sd a0, 16(sp)
; RV64-NEXT: vsetivli zero, 4, e32,m1,ta,mu
; RV64-NEXT: vle32.v v8, (sp)
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: addi a0, sp, 16
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: addi sp, sp, 32
; RV64-NEXT: ret
%z = load <4 x i32>, <4 x i32>* %ptr, align 1
ret <4 x i32> %z
@ -119,37 +121,38 @@ define <4 x i32> @load_v4i32_align1(<4 x i32>* %ptr) {
define <4 x i32> @load_v4i32_align2(<4 x i32>* %ptr) {
; RV32-LABEL: load_v4i32_align2:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: addi sp, sp, -32
; RV32-NEXT: .cfi_def_cfa_offset 32
; RV32-NEXT: lhu a1, 14(a0)
; RV32-NEXT: lhu a2, 12(a0)
; RV32-NEXT: slli a1, a1, 16
; RV32-NEXT: or a1, a1, a2
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a1, 28(sp)
; RV32-NEXT: lhu a1, 10(a0)
; RV32-NEXT: lhu a2, 8(a0)
; RV32-NEXT: slli a1, a1, 16
; RV32-NEXT: or a1, a1, a2
; RV32-NEXT: sw a1, 8(sp)
; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lhu a1, 6(a0)
; RV32-NEXT: lhu a2, 4(a0)
; RV32-NEXT: slli a1, a1, 16
; RV32-NEXT: or a1, a1, a2
; RV32-NEXT: sw a1, 4(sp)
; RV32-NEXT: sw a1, 20(sp)
; RV32-NEXT: lhu a1, 2(a0)
; RV32-NEXT: lhu a0, 0(a0)
; RV32-NEXT: slli a1, a1, 16
; RV32-NEXT: or a0, a1, a0
; RV32-NEXT: sw a0, 0(sp)
; RV32-NEXT: sw a0, 16(sp)
; RV32-NEXT: vsetivli zero, 4, e32,m1,ta,mu
; RV32-NEXT: vle32.v v8, (sp)
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: addi sp, sp, 32
; RV32-NEXT: ret
;
; RV64-LABEL: load_v4i32_align2:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: addi sp, sp, -32
; RV64-NEXT: .cfi_def_cfa_offset 32
; RV64-NEXT: lhu a1, 10(a0)
; RV64-NEXT: lhu a2, 8(a0)
; RV64-NEXT: lhu a3, 14(a0)
@ -160,7 +163,7 @@ define <4 x i32> @load_v4i32_align2(<4 x i32>* %ptr) {
; RV64-NEXT: or a2, a2, a4
; RV64-NEXT: slli a2, a2, 32
; RV64-NEXT: or a1, a2, a1
; RV64-NEXT: sd a1, 8(sp)
; RV64-NEXT: sd a1, 24(sp)
; RV64-NEXT: lhu a1, 2(a0)
; RV64-NEXT: lhu a2, 0(a0)
; RV64-NEXT: lhu a3, 6(a0)
@ -171,10 +174,11 @@ define <4 x i32> @load_v4i32_align2(<4 x i32>* %ptr) {
; RV64-NEXT: or a0, a2, a0
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: or a0, a0, a1
; RV64-NEXT: sd a0, 0(sp)
; RV64-NEXT: sd a0, 16(sp)
; RV64-NEXT: vsetivli zero, 4, e32,m1,ta,mu
; RV64-NEXT: vle32.v v8, (sp)
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: addi a0, sp, 16
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: addi sp, sp, 32
; RV64-NEXT: ret
%z = load <4 x i32>, <4 x i32>* %ptr, align 2
ret <4 x i32> %z
@ -183,17 +187,18 @@ define <4 x i32> @load_v4i32_align2(<4 x i32>* %ptr) {
define void @store_v4i32_align1(<4 x i32> %x, <4 x i32>* %ptr) {
; RV32-LABEL: store_v4i32_align1:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: addi sp, sp, -32
; RV32-NEXT: .cfi_def_cfa_offset 32
; RV32-NEXT: vsetivli zero, 4, e32,m1,ta,mu
; RV32-NEXT: vse32.v v8, (sp)
; RV32-NEXT: lw a1, 12(sp)
; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vse32.v v8, (a1)
; RV32-NEXT: lw a1, 28(sp)
; RV32-NEXT: sb a1, 12(a0)
; RV32-NEXT: lw a2, 8(sp)
; RV32-NEXT: lw a2, 24(sp)
; RV32-NEXT: sb a2, 8(a0)
; RV32-NEXT: lw a3, 4(sp)
; RV32-NEXT: lw a3, 20(sp)
; RV32-NEXT: sb a3, 4(a0)
; RV32-NEXT: lw a4, 0(sp)
; RV32-NEXT: lw a4, 16(sp)
; RV32-NEXT: sb a4, 0(a0)
; RV32-NEXT: srli a5, a1, 24
; RV32-NEXT: sb a5, 15(a0)
@ -219,18 +224,19 @@ define void @store_v4i32_align1(<4 x i32> %x, <4 x i32>* %ptr) {
; RV32-NEXT: sb a1, 2(a0)
; RV32-NEXT: srli a1, a4, 8
; RV32-NEXT: sb a1, 1(a0)
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: addi sp, sp, 32
; RV32-NEXT: ret
;
; RV64-LABEL: store_v4i32_align1:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: addi sp, sp, -32
; RV64-NEXT: .cfi_def_cfa_offset 32
; RV64-NEXT: vsetivli zero, 4, e32,m1,ta,mu
; RV64-NEXT: vse32.v v8, (sp)
; RV64-NEXT: ld a1, 8(sp)
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: vse32.v v8, (a1)
; RV64-NEXT: ld a1, 24(sp)
; RV64-NEXT: sb a1, 8(a0)
; RV64-NEXT: ld a2, 0(sp)
; RV64-NEXT: ld a2, 16(sp)
; RV64-NEXT: sb a2, 0(a0)
; RV64-NEXT: srli a3, a1, 56
; RV64-NEXT: sb a3, 15(a0)
@ -260,7 +266,7 @@ define void @store_v4i32_align1(<4 x i32> %x, <4 x i32>* %ptr) {
; RV64-NEXT: sb a1, 2(a0)
; RV64-NEXT: srli a1, a2, 8
; RV64-NEXT: sb a1, 1(a0)
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: addi sp, sp, 32
; RV64-NEXT: ret
store <4 x i32> %x, <4 x i32>* %ptr, align 1
ret void
@ -269,17 +275,18 @@ define void @store_v4i32_align1(<4 x i32> %x, <4 x i32>* %ptr) {
define void @store_v4i32_align2(<4 x i32> %x, <4 x i32>* %ptr) {
; RV32-LABEL: store_v4i32_align2:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: addi sp, sp, -32
; RV32-NEXT: .cfi_def_cfa_offset 32
; RV32-NEXT: vsetivli zero, 4, e32,m1,ta,mu
; RV32-NEXT: vse32.v v8, (sp)
; RV32-NEXT: lw a1, 12(sp)
; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vse32.v v8, (a1)
; RV32-NEXT: lw a1, 28(sp)
; RV32-NEXT: sh a1, 12(a0)
; RV32-NEXT: lw a2, 8(sp)
; RV32-NEXT: lw a2, 24(sp)
; RV32-NEXT: sh a2, 8(a0)
; RV32-NEXT: lw a3, 4(sp)
; RV32-NEXT: lw a3, 20(sp)
; RV32-NEXT: sh a3, 4(a0)
; RV32-NEXT: lw a4, 0(sp)
; RV32-NEXT: lw a4, 16(sp)
; RV32-NEXT: sh a4, 0(a0)
; RV32-NEXT: srli a1, a1, 16
; RV32-NEXT: sh a1, 14(a0)
@ -289,18 +296,19 @@ define void @store_v4i32_align2(<4 x i32> %x, <4 x i32>* %ptr) {
; RV32-NEXT: sh a1, 6(a0)
; RV32-NEXT: srli a1, a4, 16
; RV32-NEXT: sh a1, 2(a0)
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: addi sp, sp, 32
; RV32-NEXT: ret
;
; RV64-LABEL: store_v4i32_align2:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: addi sp, sp, -32
; RV64-NEXT: .cfi_def_cfa_offset 32
; RV64-NEXT: vsetivli zero, 4, e32,m1,ta,mu
; RV64-NEXT: vse32.v v8, (sp)
; RV64-NEXT: ld a1, 8(sp)
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: vse32.v v8, (a1)
; RV64-NEXT: ld a1, 24(sp)
; RV64-NEXT: sh a1, 8(a0)
; RV64-NEXT: ld a2, 0(sp)
; RV64-NEXT: ld a2, 16(sp)
; RV64-NEXT: sh a2, 0(a0)
; RV64-NEXT: srli a3, a1, 48
; RV64-NEXT: sh a3, 14(a0)
@ -314,7 +322,7 @@ define void @store_v4i32_align2(<4 x i32> %x, <4 x i32>* %ptr) {
; RV64-NEXT: sh a1, 4(a0)
; RV64-NEXT: srli a1, a2, 16
; RV64-NEXT: sh a1, 2(a0)
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: addi sp, sp, 32
; RV64-NEXT: ret
store <4 x i32> %x, <4 x i32>* %ptr, align 2
ret void

View File

@ -25,8 +25,7 @@ body: |
; CHECK-LABEL: name: foo
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: $x2 = frame-setup ADDI $x2, -16
; CHECK: CFI_INSTRUCTION def_cfa_offset 16
; CHECK: CFI_INSTRUCTION def_cfa_offset 0
; CHECK: $x10 = PseudoReadVLENB
; CHECK: $x10 = SLLI killed $x10, 1
; CHECK: $x2 = SUB $x2, killed $x10
@ -34,7 +33,6 @@ body: |
; CHECK: $x10 = PseudoReadVLENB
; CHECK: $x10 = SLLI killed $x10, 1
; CHECK: $x2 = ADD $x2, killed $x10
; CHECK: $x2 = frame-destroy ADDI $x2, 16
; CHECK: PseudoRET
bb.0:
bb.1:

View File

@ -290,18 +290,18 @@ define void @local_var_m2_with_bp(i64 %n) {
define i64 @fixed_object(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, i64 %8) nounwind {
; RV64IV-LABEL: fixed_object:
; RV64IV: # %bb.0:
; RV64IV-NEXT: addi sp, sp, -32
; RV64IV-NEXT: addi sp, sp, -16
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 3
; RV64IV-NEXT: sub sp, sp, a0
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 3
; RV64IV-NEXT: add a0, sp, a0
; RV64IV-NEXT: ld a0, 32(a0)
; RV64IV-NEXT: ld a0, 16(a0)
; RV64IV-NEXT: csrr a1, vlenb
; RV64IV-NEXT: slli a1, a1, 3
; RV64IV-NEXT: add sp, sp, a1
; RV64IV-NEXT: addi sp, sp, 32
; RV64IV-NEXT: addi sp, sp, 16
; RV64IV-NEXT: ret
%fixed_size = alloca i32
%rvv_vector = alloca <vscale x 8 x i64>, align 8

View File

@ -663,15 +663,15 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
;
; RV32MV-LABEL: test_srem_vec:
; RV32MV: # %bb.0:
; RV32MV-NEXT: addi sp, sp, -64
; RV32MV-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
; RV32MV-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
; RV32MV-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
; RV32MV-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
; RV32MV-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
; RV32MV-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
; RV32MV-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
; RV32MV-NEXT: addi s0, sp, 64
; RV32MV-NEXT: addi sp, sp, -96
; RV32MV-NEXT: sw ra, 92(sp) # 4-byte Folded Spill
; RV32MV-NEXT: sw s0, 88(sp) # 4-byte Folded Spill
; RV32MV-NEXT: sw s1, 84(sp) # 4-byte Folded Spill
; RV32MV-NEXT: sw s2, 80(sp) # 4-byte Folded Spill
; RV32MV-NEXT: sw s3, 76(sp) # 4-byte Folded Spill
; RV32MV-NEXT: sw s4, 72(sp) # 4-byte Folded Spill
; RV32MV-NEXT: sw s5, 68(sp) # 4-byte Folded Spill
; RV32MV-NEXT: addi s0, sp, 96
; RV32MV-NEXT: andi sp, sp, -32
; RV32MV-NEXT: mv s1, a0
; RV32MV-NEXT: lw a0, 8(a0)
@ -695,28 +695,29 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
; RV32MV-NEXT: addi a2, zero, 6
; RV32MV-NEXT: mv a3, zero
; RV32MV-NEXT: call __moddi3@plt
; RV32MV-NEXT: sw a1, 4(sp)
; RV32MV-NEXT: sw a0, 0(sp)
; RV32MV-NEXT: sw a1, 36(sp)
; RV32MV-NEXT: sw a0, 32(sp)
; RV32MV-NEXT: addi a2, zero, -5
; RV32MV-NEXT: addi a3, zero, -1
; RV32MV-NEXT: mv a0, s4
; RV32MV-NEXT: mv a1, s5
; RV32MV-NEXT: call __moddi3@plt
; RV32MV-NEXT: sw a1, 20(sp)
; RV32MV-NEXT: sw a0, 16(sp)
; RV32MV-NEXT: sw a1, 52(sp)
; RV32MV-NEXT: sw a0, 48(sp)
; RV32MV-NEXT: addi a2, zero, 7
; RV32MV-NEXT: mv a0, s2
; RV32MV-NEXT: mv a1, s3
; RV32MV-NEXT: mv a3, zero
; RV32MV-NEXT: call __moddi3@plt
; RV32MV-NEXT: sw a1, 12(sp)
; RV32MV-NEXT: sw a0, 8(sp)
; RV32MV-NEXT: sw a1, 44(sp)
; RV32MV-NEXT: sw a0, 40(sp)
; RV32MV-NEXT: addi a0, zero, 85
; RV32MV-NEXT: vsetivli zero, 1, e8,mf8,ta,mu
; RV32MV-NEXT: vmv.s.x v0, a0
; RV32MV-NEXT: vsetivli zero, 8, e32,m2,ta,mu
; RV32MV-NEXT: vmv.v.i v26, 1
; RV32MV-NEXT: vle32.v v28, (sp)
; RV32MV-NEXT: addi a0, sp, 32
; RV32MV-NEXT: vle32.v v28, (a0)
; RV32MV-NEXT: lui a0, %hi(.LCPI3_0)
; RV32MV-NEXT: addi a0, a0, %lo(.LCPI3_0)
; RV32MV-NEXT: vle32.v v30, (a0)
@ -755,23 +756,23 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
; RV32MV-NEXT: slli a0, a0, 2
; RV32MV-NEXT: or a0, a1, a0
; RV32MV-NEXT: sw a0, 8(s1)
; RV32MV-NEXT: addi sp, s0, -64
; RV32MV-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
; RV32MV-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
; RV32MV-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
; RV32MV-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
; RV32MV-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
; RV32MV-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
; RV32MV-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32MV-NEXT: addi sp, sp, 64
; RV32MV-NEXT: addi sp, s0, -96
; RV32MV-NEXT: lw s5, 68(sp) # 4-byte Folded Reload
; RV32MV-NEXT: lw s4, 72(sp) # 4-byte Folded Reload
; RV32MV-NEXT: lw s3, 76(sp) # 4-byte Folded Reload
; RV32MV-NEXT: lw s2, 80(sp) # 4-byte Folded Reload
; RV32MV-NEXT: lw s1, 84(sp) # 4-byte Folded Reload
; RV32MV-NEXT: lw s0, 88(sp) # 4-byte Folded Reload
; RV32MV-NEXT: lw ra, 92(sp) # 4-byte Folded Reload
; RV32MV-NEXT: addi sp, sp, 96
; RV32MV-NEXT: ret
;
; RV64MV-LABEL: test_srem_vec:
; RV64MV: # %bb.0:
; RV64MV-NEXT: addi sp, sp, -64
; RV64MV-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; RV64MV-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
; RV64MV-NEXT: addi s0, sp, 64
; RV64MV-NEXT: addi sp, sp, -96
; RV64MV-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
; RV64MV-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
; RV64MV-NEXT: addi s0, sp, 96
; RV64MV-NEXT: andi sp, sp, -32
; RV64MV-NEXT: lb a1, 12(a0)
; RV64MV-NEXT: lwu a2, 8(a0)
@ -804,7 +805,7 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
; RV64MV-NEXT: addi a5, zero, 6
; RV64MV-NEXT: mul a1, a1, a5
; RV64MV-NEXT: sub a1, a3, a1
; RV64MV-NEXT: sd a1, 0(sp)
; RV64MV-NEXT: sd a1, 32(sp)
; RV64MV-NEXT: lui a1, 1035469
; RV64MV-NEXT: addiw a1, a1, -819
; RV64MV-NEXT: slli a1, a1, 12
@ -820,7 +821,7 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
; RV64MV-NEXT: slli a3, a1, 2
; RV64MV-NEXT: add a1, a3, a1
; RV64MV-NEXT: add a1, a2, a1
; RV64MV-NEXT: sd a1, 16(sp)
; RV64MV-NEXT: sd a1, 48(sp)
; RV64MV-NEXT: lui a1, 18725
; RV64MV-NEXT: addiw a1, a1, -1755
; RV64MV-NEXT: slli a1, a1, 12
@ -836,9 +837,10 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
; RV64MV-NEXT: slli a2, a1, 3
; RV64MV-NEXT: sub a1, a1, a2
; RV64MV-NEXT: add a1, a4, a1
; RV64MV-NEXT: sd a1, 8(sp)
; RV64MV-NEXT: sd a1, 40(sp)
; RV64MV-NEXT: vsetivli zero, 4, e64,m2,ta,mu
; RV64MV-NEXT: vle64.v v26, (sp)
; RV64MV-NEXT: addi a1, sp, 32
; RV64MV-NEXT: vle64.v v26, (a1)
; RV64MV-NEXT: lui a1, %hi(.LCPI3_0)
; RV64MV-NEXT: addi a1, a1, %lo(.LCPI3_0)
; RV64MV-NEXT: vle64.v v28, (a1)
@ -865,10 +867,10 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
; RV64MV-NEXT: slli a2, a3, 33
; RV64MV-NEXT: or a1, a1, a2
; RV64MV-NEXT: sd a1, 0(a0)
; RV64MV-NEXT: addi sp, s0, -64
; RV64MV-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; RV64MV-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; RV64MV-NEXT: addi sp, sp, 64
; RV64MV-NEXT: addi sp, s0, -96
; RV64MV-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
; RV64MV-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
; RV64MV-NEXT: addi sp, sp, 96
; RV64MV-NEXT: ret
%ld = load <3 x i33>, <3 x i33>* %X
%srem = srem <3 x i33> %ld, <i33 6, i33 7, i33 -5>