AMDGPU: Use gfx9 carry-less add/sub instructions

llvm-svn: 319491
This commit is contained in:
Matt Arsenault 2017-11-30 22:51:26 +00:00
parent ba4014e9dc
commit 84445dd13c
29 changed files with 630 additions and 291 deletions

View File

@ -456,7 +456,8 @@ void AMDGPUDAGToDAGISel::Select(SDNode *N) {
N = glueCopyToM0(N);
switch (Opc) {
default: break;
default:
break;
// We are selecting i64 ADD here instead of custom lower it during
// DAG legalization, so we can fold some i64 ADDs used for address
// calculation into the LOAD and STORE instructions.
@ -703,6 +704,7 @@ bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
return true;
}
// FIXME: Should only handle addcarry/subcarry
void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
SDLoc DL(N);
SDValue LHS = N->getOperand(0);
@ -712,8 +714,7 @@ void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
bool ConsumeCarry = (Opcode == ISD::ADDE || Opcode == ISD::SUBE);
bool ProduceCarry =
ConsumeCarry || Opcode == ISD::ADDC || Opcode == ISD::SUBC;
bool IsAdd =
(Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE);
bool IsAdd = Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE;
SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
@ -876,8 +877,12 @@ bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
Zero, Addr.getOperand(1));
if (isDSOffsetLegal(Sub, ByteOffset, 16)) {
// FIXME: Select to VOP3 version for with-carry.
unsigned SubOp = Subtarget->hasAddNoCarry() ?
AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_I32_e32;
MachineSDNode *MachineSub
= CurDAG->getMachineNode(AMDGPU::V_SUB_I32_e32, DL, MVT::i32,
= CurDAG->getMachineNode(SubOp, DL, MVT::i32,
Zero, Addr.getOperand(1));
Base = SDValue(MachineSub, 0);
@ -946,8 +951,11 @@ bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
Zero, Addr.getOperand(1));
if (isDSOffsetLegal(Sub, DWordOffset1, 8)) {
unsigned SubOp = Subtarget->hasAddNoCarry() ?
AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_I32_e32;
MachineSDNode *MachineSub
= CurDAG->getMachineNode(AMDGPU::V_SUB_I32_e32, DL, MVT::i32,
= CurDAG->getMachineNode(SubOp, DL, MVT::i32,
Zero, Addr.getOperand(1));
Base = SDValue(MachineSub, 0);

View File

@ -226,6 +226,14 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
#if 0
setOperationAction(ISD::ADDCARRY, MVT::i64, Legal);
setOperationAction(ISD::SUBCARRY, MVT::i64, Legal);
#endif
//setOperationAction(ISD::ADDC, MVT::i64, Expand);
//setOperationAction(ISD::SUBC, MVT::i64, Expand);
// We only support LOAD/STORE and vector manipulation ops for vectors
// with > 4 elements.
for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,

View File

@ -1057,9 +1057,9 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(
.addReg(TIDIGYReg)
.addReg(TIDReg);
// (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z
BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg)
.addReg(TIDReg)
.addReg(TIDIGZReg);
getAddNoCarry(Entry, Insert, DL, TIDReg)
.addReg(TIDReg)
.addReg(TIDIGZReg);
} else {
// Get the wave id
BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64),
@ -1082,9 +1082,9 @@ unsigned SIInstrInfo::calculateLDSSpillAddress(
// Add FrameIndex to LDS offset
unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize);
BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg)
.addImm(LDSOffset)
.addReg(TIDReg);
getAddNoCarry(MBB, MI, DL, TmpReg)
.addImm(LDSOffset)
.addReg(TIDReg);
return TmpReg;
}
@ -2831,7 +2831,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
return true;
}
unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
default: return AMDGPU::INSTRUCTION_LIST_END;
case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
@ -2844,10 +2844,17 @@ unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
return MI.getOperand(1).isReg() ?
AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
case AMDGPU::S_ADD_I32:
case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32;
case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_I32_e32;
case AMDGPU::S_ADDC_U32:
return AMDGPU::V_ADDC_U32_e32;
case AMDGPU::S_SUB_I32:
case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32;
return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_I32_e32;
// FIXME: These are not consistently handled, and selected when the carry is
// used.
case AMDGPU::S_ADD_U32:
return AMDGPU::V_ADD_I32_e32;
case AMDGPU::S_SUB_U32:
return AMDGPU::V_SUB_I32_e32;
case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32;
case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64;
@ -2894,10 +2901,6 @@ unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
}
}
bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
}
const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
unsigned OpNo) const {
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
@ -3612,6 +3615,14 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
splitScalar64BitAddSub(Worklist, Inst);
Inst.eraseFromParent();
continue;
case AMDGPU::S_ADD_I32:
case AMDGPU::S_SUB_I32:
// FIXME: The u32 versions currently selected use the carry.
if (moveScalarAddSub(Worklist, Inst))
continue;
// Default handling
break;
case AMDGPU::S_AND_B64:
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64);
Inst.eraseFromParent();
@ -3720,6 +3731,14 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
auto Add = MRI.getUniqueVRegDef(VAddr->getReg());
unsigned Offset = 0;
// FIXME: This isn't safe because the addressing mode doesn't work
// correctly if vaddr is negative.
//
// FIXME: Handle v_add_u32 and VOP3 form. Also don't rely on immediate
// being in src0.
//
// FIXME: Should probably be done somewhere else, maybe SIFoldOperands.
//
// See if we can extract an immediate offset by recognizing one of these:
// V_ADD_I32_e32 dst, imm, src1
// V_ADD_I32_e32 dst, (S_MOV_B32 imm), src1
@ -3728,7 +3747,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
const MachineOperand *Src =
getNamedOperand(*Add, AMDGPU::OpName::src0);
if (Src && Src->isReg()) {
if (Src->isReg()) {
auto Mov = MRI.getUniqueVRegDef(Src->getReg());
if (Mov && Mov->getOpcode() == AMDGPU::S_MOV_B32)
Src = &Mov->getOperand(1);
@ -3858,6 +3877,41 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
}
}
// Add/sub require special handling to deal with carry outs.
bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist,
MachineInstr &Inst) const {
if (ST.hasAddNoCarry()) {
// Assume there is no user of scc since we don't select this in that case.
// Since scc isn't used, it doesn't really matter if the i32 or u32 variant
// is used.
MachineBasicBlock &MBB = *Inst.getParent();
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
unsigned OldDstReg = Inst.getOperand(0).getReg();
unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned Opc = Inst.getOpcode();
assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32);
unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ?
AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64;
assert(Inst.getOperand(3).getReg() == AMDGPU::SCC);
Inst.RemoveOperand(3);
Inst.setDesc(get(NewOpc));
Inst.addImplicitDefUseOperands(*MBB.getParent());
MRI.replaceRegWith(OldDstReg, ResultReg);
legalizeOperands(Inst);
addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
return true;
}
return false;
}
void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist,
MachineInstr &Inst) const {
MachineBasicBlock &MBB = *Inst.getParent();
@ -3870,7 +3924,10 @@ void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist,
unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BuildMI(MBB, MII, DL, get(AMDGPU::V_SUB_I32_e32), TmpReg)
unsigned SubOp = ST.hasAddNoCarry() ?
AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_I32_e32;
BuildMI(MBB, MII, DL, get(SubOp), TmpReg)
.addImm(0)
.addReg(Src.getReg());

View File

@ -76,6 +76,9 @@ public:
private:
void swapOperands(MachineInstr &Inst) const;
bool moveScalarAddSub(SetVectorType &Worklist,
MachineInstr &Inst) const;
void lowerScalarAbs(SetVectorType &Worklist,
MachineInstr &Inst) const;
@ -691,9 +694,7 @@ public:
bool verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const override;
static unsigned getVALUOp(const MachineInstr &MI);
bool isSALUOpSupportedOnVALU(const MachineInstr &MI) const;
unsigned getVALUOp(const MachineInstr &MI) const;
/// \brief Return the correct register class for \p OpNo. For target-specific
/// instructions, this will return the register class that has been defined

View File

@ -159,10 +159,14 @@ def S_SUB_U64_PSEUDO : SPseudoInstSI <
[(set SReg_64:$vdst, (sub i64:$src0, i64:$src1))]
>;
def S_ADDC_U64_PSEUDO : SPseudoInstSI <(outs SReg_64:$vdst, SReg_64:$sdst),
(ins SSrc_b64:$src0, SSrc_b64:$src1)>;
def S_SUBC_U64_PSEUDO : SPseudoInstSI <(outs SReg_64:$vdst, SReg_64:$sdst),
(ins SSrc_b64:$src0, SSrc_b64:$src1)>;
def S_ADD_U64_CO_PSEUDO : SPseudoInstSI <
(outs SReg_64:$vdst, VOPDstS64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1)
>;
def S_SUB_U64_CO_PSEUDO : SPseudoInstSI <
(outs SReg_64:$vdst, VOPDstS64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1)
>;
} // End usesCustomInserter = 1, Defs = [SCC]
let usesCustomInserter = 1, SALU = 1 in {

View File

@ -498,9 +498,12 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair(
if (CI.BaseOff) {
BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BaseRegFlags = RegState::Kill;
BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
.addImm(CI.BaseOff)
.addReg(AddrReg->getReg());
unsigned AddOpc = STM->hasAddNoCarry() ?
AMDGPU::V_ADD_U32_e32 : AMDGPU::V_ADD_I32_e32;
BuildMI(*MBB, CI.Paired, DL, TII->get(AddOpc), BaseReg)
.addImm(CI.BaseOff)
.addReg(AddrReg->getReg());
}
MachineInstrBuilder Read2 =
@ -581,9 +584,12 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
if (CI.BaseOff) {
BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
BaseRegFlags = RegState::Kill;
BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg)
.addImm(CI.BaseOff)
.addReg(Addr->getReg());
unsigned AddOpc = STM->hasAddNoCarry() ?
AMDGPU::V_ADD_U32_e32 : AMDGPU::V_ADD_I32_e32;
BuildMI(*MBB, CI.Paired, DL, TII->get(AddOpc), BaseReg)
.addImm(CI.BaseOff)
.addReg(Addr->getReg());
}
MachineInstrBuilder Write2 =

View File

@ -1,14 +1,15 @@
; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SIVI,FUNC %s
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SIVI,FUNC %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
;FUNC-LABEL: {{^}}test1:
;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; FUNC-LABEL: {{^}}s_add_i32:
; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;SI: s_add_i32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}}
;SI: v_mov_b32_e32 v[[REG]], s[[REG]]
;SI: buffer_store_dword v[[REG]],
define amdgpu_kernel void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
; GCN: s_add_i32 s[[REG:[0-9]+]], {{s[0-9]+, s[0-9]+}}
; GCN: v_mov_b32_e32 v[[REG]], s[[REG]]
; GCN: buffer_store_dword v[[REG]],
define amdgpu_kernel void @s_add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
%b = load i32, i32 addrspace(1)* %b_ptr
@ -17,14 +18,13 @@ define amdgpu_kernel void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in)
ret void
}
;FUNC-LABEL: {{^}}test2:
;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; FUNC-LABEL: {{^}}s_add_v2i32:
; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;SI: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
;SI: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
define amdgpu_kernel void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
define amdgpu_kernel void @s_add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32>, <2 x i32> addrspace(1)* %in
%b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr
@ -33,18 +33,17 @@ define amdgpu_kernel void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspa
ret void
}
;FUNC-LABEL: {{^}}test4:
;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; FUNC-LABEL: {{^}}s_add_v4i32:
; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: ADD_INT {{[* ]*}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
;SI: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
;SI: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
;SI: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
;SI: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
define amdgpu_kernel void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
; GCN: s_add_i32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
define amdgpu_kernel void @s_add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1)* %in
%b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
@ -53,7 +52,7 @@ define amdgpu_kernel void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspa
ret void
}
; FUNC-LABEL: {{^}}test8:
; FUNC-LABEL: {{^}}s_add_v8i32:
; EG: ADD_INT
; EG: ADD_INT
; EG: ADD_INT
@ -63,22 +62,22 @@ define amdgpu_kernel void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspa
; EG: ADD_INT
; EG: ADD_INT
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
define amdgpu_kernel void @test8(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) {
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
define amdgpu_kernel void @s_add_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <8 x i32> %b) {
entry:
%0 = add <8 x i32> %a, %b
store <8 x i32> %0, <8 x i32> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}test16:
; FUNC-LABEL: {{^}}s_add_v16i32:
; EG: ADD_INT
; EG: ADD_INT
; EG: ADD_INT
@ -96,32 +95,62 @@ entry:
; EG: ADD_INT
; EG: ADD_INT
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
; SI: s_add_i32
define amdgpu_kernel void @test16(<16 x i32> addrspace(1)* %out, <16 x i32> %a, <16 x i32> %b) {
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
; GCN: s_add_i32
define amdgpu_kernel void @s_add_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> %a, <16 x i32> %b) {
entry:
%0 = add <16 x i32> %a, %b
store <16 x i32> %0, <16 x i32> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}v_add_i32:
; GCN: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat|global}}_load_dword [[B:v[0-9]+]]
; SIVI: v_add_{{i|u}}32_e32 v{{[0-9]+}}, vcc, [[B]], [[A]]
; GFX9: v_add_u32_e32 v{{[0-9]+}}, [[A]], [[B]]
define amdgpu_kernel void @v_add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 %tid
%b_ptr = getelementptr i32, i32 addrspace(1)* %gep, i32 1
%a = load volatile i32, i32 addrspace(1)* %gep
%b = load volatile i32, i32 addrspace(1)* %b_ptr
%result = add i32 %a, %b
store i32 %result, i32 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}v_add_imm_i32:
; GCN: {{buffer|flat|global}}_load_dword [[A:v[0-9]+]]
; SIVI: v_add_{{i|u}}32_e32 v{{[0-9]+}}, vcc, 0x7b, [[A]]
; GFX9: v_add_u32_e32 v{{[0-9]+}}, 0x7b, [[A]]
define amdgpu_kernel void @v_add_imm_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 %tid
%b_ptr = getelementptr i32, i32 addrspace(1)* %gep, i32 1
%a = load volatile i32, i32 addrspace(1)* %gep
%result = add i32 %a, 123
store i32 %result, i32 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}add64:
; SI: s_add_u32
; SI: s_addc_u32
; GCN: s_add_u32
; GCN: s_addc_u32
; EG: MEM_RAT_CACHELESS STORE_RAW [[LO:T[0-9]+\.XY]]
; EG-DAG: ADD_INT {{[* ]*}}
@ -131,8 +160,8 @@ entry:
; EG-NOT: SUB
define amdgpu_kernel void @add64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
%0 = add i64 %a, %b
store i64 %0, i64 addrspace(1)* %out
%add = add i64 %a, %b
store i64 %add, i64 addrspace(1)* %out
ret void
}
@ -142,7 +171,7 @@ entry:
; to a VGPR before doing the add.
; FUNC-LABEL: {{^}}add64_sgpr_vgpr:
; SI-NOT: v_addc_u32_e32 s
; GCN-NOT: v_addc_u32_e32 s
; EG: MEM_RAT_CACHELESS STORE_RAW [[LO:T[0-9]+\.XY]]
; EG-DAG: ADD_INT {{[* ]*}}
@ -160,8 +189,8 @@ entry:
; Test i64 add inside a branch.
; FUNC-LABEL: {{^}}add64_in_branch:
; SI: s_add_u32
; SI: s_addc_u32
; GCN: s_add_u32
; GCN: s_addc_u32
; EG: MEM_RAT_CACHELESS STORE_RAW [[LO:T[0-9]+\.XY]]
; EG-DAG: ADD_INT {{[* ]*}}
@ -187,3 +216,8 @@ endif:
store i64 %3, i64 addrspace(1)* %out
ret void
}
declare i32 @llvm.r600.read.tidig.x() #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone speculatable }

View File

@ -1,15 +1,18 @@
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=GCN %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
; GCN-LABEL: ds_read32_combine_stride_400:
; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B1:v[0-9]+]], 0x320, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B2:v[0-9]+]], 0x640, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B3:v[0-9]+]], 0x960, [[BASE]]
; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset1:100
; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[B1]] offset1:100
; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[B2]] offset1:100
@ -46,12 +49,15 @@ bb:
; GCN-LABEL: ds_read32_combine_stride_400_back:
; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B1:v[0-9]+]], 0x320, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B2:v[0-9]+]], 0x640, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B3:v[0-9]+]], 0x960, [[BASE]]
; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset1:100
; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[B1]] offset1:100
; GCN-DAG: ds_read2_b32 v[{{[0-9]+:[0-9]+}}], [[B2]] offset1:100
@ -124,12 +130,15 @@ bb:
; GCN-LABEL: ds_read32_combine_stride_8192_shifted:
; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B1:v[0-9]+]], 8, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B2:v[0-9]+]], 0x4008, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B3:v[0-9]+]], 0x8008, [[BASE]]
; GCN-DAG: ds_read2st64_b32 v[{{[0-9]+:[0-9]+}}], [[B1]] offset1:32
; GCN-DAG: ds_read2st64_b32 v[{{[0-9]+:[0-9]+}}], [[B2]] offset1:32
; GCN-DAG: ds_read2st64_b32 v[{{[0-9]+:[0-9]+}}], [[B3]] offset1:32
@ -160,8 +169,10 @@ bb:
; GCN-LABEL: ds_read64_combine_stride_400:
; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 0x960, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 0x960, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B1:v[0-9]+]], vcc, 0x960, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B1:v[0-9]+]], 0x960, [[BASE]]
; GCN-DAG: ds_read2_b64 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset1:50
; GCN-DAG: ds_read2_b64 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset0:100 offset1:150
; GCN-DAG: ds_read2_b64 v[{{[0-9]+:[0-9]+}}], [[BASE]] offset0:200 offset1:250
@ -198,12 +209,15 @@ bb:
; GCN-LABEL: ds_read64_combine_stride_8192_shifted:
; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B1:v[0-9]+]], 8, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B2:v[0-9]+]], 0x4008, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B3:v[0-9]+]], 0x8008, [[BASE]]
; GCN-DAG: ds_read2st64_b64 v[{{[0-9]+:[0-9]+}}], [[B1]] offset1:16
; GCN-DAG: ds_read2st64_b64 v[{{[0-9]+:[0-9]+}}], [[B2]] offset1:16
; GCN-DAG: ds_read2st64_b64 v[{{[0-9]+:[0-9]+}}], [[B3]] offset1:16
@ -234,12 +248,15 @@ bb:
; GCN-LABEL: ds_write32_combine_stride_400:
; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B1:v[0-9]+]], 0x320, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B2:v[0-9]+]], 0x640, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B3:v[0-9]+]], 0x960, [[BASE]]
; GCN-DAG: ds_write2_b32 [[BASE]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
; GCN-DAG: ds_write2_b32 [[B1]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
; GCN-DAG: ds_write2_b32 [[B2]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
@ -267,12 +284,15 @@ bb:
; GCN-LABEL: ds_write32_combine_stride_400_back:
; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B1:v[0-9]+]], vcc, 0x320, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B2:v[0-9]+]], vcc, 0x640, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B3:v[0-9]+]], vcc, 0x960, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B1:v[0-9]+]], 0x320, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B2:v[0-9]+]], 0x640, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B3:v[0-9]+]], 0x960, [[BASE]]
; GCN-DAG: ds_write2_b32 [[BASE]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
; GCN-DAG: ds_write2_b32 [[B1]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
; GCN-DAG: ds_write2_b32 [[B2]], v{{[0-9]+}}, v{{[0-9]+}} offset1:100
@ -327,12 +347,15 @@ bb:
; GCN-LABEL: ds_write32_combine_stride_8192_shifted:
; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 4, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x4004, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x8004, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 4, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x4004, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x8004, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B1:v[0-9]+]], vcc, 4, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B2:v[0-9]+]], vcc, 0x4004, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B3:v[0-9]+]], vcc, 0x8004, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B1:v[0-9]+]], 4, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B2:v[0-9]+]], 0x4004, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B3:v[0-9]+]], 0x8004, [[BASE]]
; GCN-DAG: ds_write2st64_b32 [[B1]], v{{[0-9]+}}, v{{[0-9]+}} offset1:32
; GCN-DAG: ds_write2st64_b32 [[B2]], v{{[0-9]+}}, v{{[0-9]+}} offset1:32
; GCN-DAG: ds_write2st64_b32 [[B3]], v{{[0-9]+}}, v{{[0-9]+}} offset1:32
@ -356,8 +379,10 @@ bb:
; GCN-LABEL: ds_write64_combine_stride_400:
; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 0x960, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 0x960, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B1:v[0-9]+]], vcc, 0x960, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B1:v[0-9]+]], 0x960, [[BASE]]
; GCN-DAG: ds_write2_b64 [[BASE]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset1:50
; GCN-DAG: ds_write2_b64 [[BASE]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset0:100 offset1:150
; GCN-DAG: ds_write2_b64 [[BASE]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset0:200 offset1:250
@ -385,12 +410,15 @@ bb:
; GCN-LABEL: ds_write64_combine_stride_8192_shifted:
; GCN: s_load_dword [[ARG:s[0-9]+]], s[4:5], 0x0
; GCN: v_mov_b32_e32 [[BASE:v[0-9]+]], [[ARG]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
; GCN-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
; GFX9-DAG: v_add{{(_co)?}}_u32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B1:v[0-9]+]], vcc, 8, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B2:v[0-9]+]], vcc, 0x4008, [[BASE]]
; VI-DAG: v_add_u32_e32 [[B3:v[0-9]+]], vcc, 0x8008, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B1:v[0-9]+]], 8, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B2:v[0-9]+]], 0x4008, [[BASE]]
; GFX9-DAG: v_add_u32_e32 [[B3:v[0-9]+]], 0x8008, [[BASE]]
; GCN-DAG: ds_write2st64_b64 [[B1]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset1:16
; GCN-DAG: ds_write2st64_b64 [[B2]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset1:16
; GCN-DAG: ds_write2st64_b64 [[B3]], v[{{[0-9]+:[0-9]+}}], v[{{[0-9]+:[0-9]+}}] offset1:16

View File

@ -1,4 +1,5 @@
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
declare i32 @llvm.amdgcn.workitem.id.x() #0
@ -6,7 +7,8 @@ declare i32 @llvm.amdgcn.workitem.id.x() #0
; GCN-LABEL: {{^}}write_ds_sub0_offset0_global:
; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 2, v0
; GCN: v_sub_i32_e32 [[BASEPTR:v[0-9]+]], vcc, 0, [[SHL]]
; CI: v_sub_i32_e32 [[BASEPTR:v[0-9]+]], vcc, 0, [[SHL]]
; GFX9: v_sub_u32_e32 [[BASEPTR:v[0-9]+]], 0, [[SHL]]
; GCN: v_mov_b32_e32 [[VAL:v[0-9]+]], 0x7b
; GCN: ds_write_b32 [[BASEPTR]], [[VAL]] offset:12
define amdgpu_kernel void @write_ds_sub0_offset0_global() #0 {
@ -21,7 +23,8 @@ entry:
; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_max_offset:
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; CI-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; GFX9-DAG: v_sub_u32_e32 [[NEG:v[0-9]+]], 0, [[SCALED]]
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 13
; GCN: ds_write_b8 [[NEG]], [[K]] offset:65535
define amdgpu_kernel void @add_x_shl_neg_to_sub_max_offset() #1 {
@ -36,7 +39,8 @@ define amdgpu_kernel void @add_x_shl_neg_to_sub_max_offset() #1 {
; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_max_offset_p1:
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0x10000, [[SCALED]]
; CI-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0x10000, [[SCALED]]
; GFX9-DAG: v_sub_u32_e32 [[NEG:v[0-9]+]], 0x10000, [[SCALED]]
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 13
; GCN: ds_write_b8 [[NEG]], [[K]]{{$}}
define amdgpu_kernel void @add_x_shl_neg_to_sub_max_offset_p1() #1 {
@ -51,7 +55,8 @@ define amdgpu_kernel void @add_x_shl_neg_to_sub_max_offset_p1() #1 {
; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_multi_use:
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; CI-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; GFX9-DAG: v_sub_u32_e32 [[NEG:v[0-9]+]], 0, [[SCALED]]
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 13
; GCN-NOT: v_sub
; GCN: ds_write_b32 [[NEG]], [[K]] offset:123{{$}}
@ -73,7 +78,8 @@ define amdgpu_kernel void @add_x_shl_neg_to_sub_multi_use() #1 {
; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_multi_use_same_offset:
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; CI-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; GFX9-DAG: v_sub_u32_e32 [[NEG:v[0-9]+]], 0, [[SCALED]]
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 13
; GCN-NOT: v_sub
; GCN: ds_write_b32 [[NEG]], [[K]] offset:123{{$}}
@ -93,7 +99,8 @@ define amdgpu_kernel void @add_x_shl_neg_to_sub_multi_use_same_offset() #1 {
; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_misaligned_i64_max_offset:
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; CI-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; GFX9-DAG: v_sub_u32_e32 [[NEG:v[0-9]+]], 0, [[SCALED]]
; GCN: ds_write2_b32 [[NEG]], {{v[0-9]+}}, {{v[0-9]+}} offset0:254 offset1:255
define amdgpu_kernel void @add_x_shl_neg_to_sub_misaligned_i64_max_offset() #1 {
%x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
@ -107,7 +114,8 @@ define amdgpu_kernel void @add_x_shl_neg_to_sub_misaligned_i64_max_offset() #1 {
; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_misaligned_i64_max_offset_p1:
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0x3fc, [[SCALED]]
; CI-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0x3fc, [[SCALED]]
; GFX9-DAG: v_sub_u32_e32 [[NEG:v[0-9]+]], 0x3fc, [[SCALED]]
; GCN: ds_write2_b32 [[NEG]], {{v[0-9]+}}, {{v[0-9]+}} offset1:1{{$}}
define amdgpu_kernel void @add_x_shl_neg_to_sub_misaligned_i64_max_offset_p1() #1 {
%x.i = call i32 @llvm.amdgcn.workitem.id.x() #0

View File

@ -1,5 +1,5 @@
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt < %s | FileCheck -strict-whitespace -check-prefixes=GCN,CI %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -mattr=+load-store-opt < %s | FileCheck -strict-whitespace -check-prefixes=GCN,GFX9 %s
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt < %s | FileCheck -enable-var-scope -strict-whitespace -check-prefixes=GCN,CI %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -mattr=+load-store-opt,+flat-for-global < %s | FileCheck -enable-var-scope -strict-whitespace -check-prefixes=GCN,GFX9 %s
; FIXME: We don't get cases where the address was an SGPR because we
; get a copy to the address register for each one.
@ -617,8 +617,9 @@ declare i32 @llvm.amdgcn.workgroup.id.x() #1
declare i32 @llvm.amdgcn.workgroup.id.y() #1
declare i32 @llvm.amdgcn.workitem.id.x() #1
declare i32 @llvm.amdgcn.workitem.id.y() #1
declare void @llvm.amdgcn.s.barrier() #2
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
attributes #1 = { nounwind readnone speculatable }
attributes #2 = { convergent nounwind }

View File

@ -1,5 +1,5 @@
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt < %s | FileCheck -check-prefixes=GCN,CI %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -mattr=+load-store-opt < %s | FileCheck -check-prefixes=GCN,GFX9 %s
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -mattr=+load-store-opt < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
@lds = addrspace(3) global [512 x float] undef, align 4
@lds.f64 = addrspace(3) global [512 x double] undef, align 8
@ -78,7 +78,7 @@ define amdgpu_kernel void @simple_read2st64_f32_max_offset(float addrspace(1)* %
; GFX9-NOT: m0
; GCN-NOT: ds_read2st64_b32
; GCN-DAG: v_add{{(_co)?}}_{{i|u}}32_e32 [[BIGADD:v[0-9]+]], vcc, 0x10000, {{v[0-9]+}}
; GCN-DAG: v_add_{{i|u}}32_e32 [[BIGADD:v[0-9]+]], {{(vcc, )?}}0x10000, {{v[0-9]+}}
; GCN-DAG: ds_read_b32 {{v[0-9]+}}, {{v[0-9]+}} offset:256
; GCN-DAG: ds_read_b32 {{v[0-9]+}}, [[BIGADD]]{{$}}
; GCN: s_endpgm
@ -234,7 +234,7 @@ define amdgpu_kernel void @simple_read2st64_f64_max_offset(double addrspace(1)*
; GCN-NOT: ds_read2st64_b64
; GCN-DAG: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset:512
; GCN-DAG: v_add_{{(co_)?}}{{i|u}}32_e32 [[BIGADD:v[0-9]+]], vcc, 0x10000, {{v[0-9]+}}
; GCN-DAG: v_add_{{i|u}}32_e32 [[BIGADD:v[0-9]+]], {{(vcc, )?}}0x10000, {{v[0-9]+}}
; GCN: ds_read_b64 {{v\[[0-9]+:[0-9]+\]}}, [[BIGADD]]
; GCN: s_endpgm
define amdgpu_kernel void @simple_read2st64_f64_over_max_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {

View File

@ -1,15 +1,14 @@
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt < %s | FileCheck -strict-whitespace -check-prefixes=GCN,CI %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -mattr=+load-store-opt < %s | FileCheck -strict-whitespace -check-prefixes=GCN,GFX9 %s
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt < %s | FileCheck -enable-var-scope -strict-whitespace -check-prefixes=GCN,CI %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -mattr=+load-store-opt,+flat-for-global < %s | FileCheck -enable-var-scope -strict-whitespace -check-prefixes=GCN,GFX9 %s
@lds = addrspace(3) global [512 x float] undef, align 4
@lds.f64 = addrspace(3) global [512 x double] undef, align 8
; GCN-LABEL: {{^}}simple_write2_one_val_f32:
; CI-DAG: s_mov_b32 m0
; GFX9-NOT: m0
; GCN-DAG: {{buffer|global}}_load_dword [[VAL:v[0-9]+]]
; GCN-DAG: {{buffer|flat|global}}_load_dword [[VAL:v[0-9]+]]
; GCN-DAG: v_lshlrev_b32_e32 [[VPTR:v[0-9]+]], 2, v{{[0-9]+}}
; GCN: ds_write2_b32 [[VPTR]], [[VAL]], [[VAL]] offset1:8
; GCN: s_endpgm
@ -108,6 +107,7 @@ define amdgpu_kernel void @simple_write2_two_val_f32_volatile_1(float addrspace(
; GFX9: global_load_dwordx2 v{{\[}}[[VAL0:[0-9]+]]:{{[0-9]+\]}}
; GFX9: global_load_dwordx2 v{{\[[0-9]+}}:[[VAL1:[0-9]+]]{{\]}}
; GCN: ds_write2_b32 [[VPTR]], v[[VAL0]], v[[VAL1]] offset1:8
; GCN: s_endpgm
define amdgpu_kernel void @simple_write2_two_val_subreg2_mixed_f32(float addrspace(1)* %C, <2 x float> addrspace(1)* %in) #0 {
@ -502,5 +502,5 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
declare i32 @llvm.amdgcn.workitem.id.y() #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
attributes #1 = { nounwind readnone speculatable }
attributes #2 = { convergent nounwind }

View File

@ -63,7 +63,7 @@ define amdgpu_kernel void @simple_write2st64_two_val_f32_2_5(float addrspace(1)*
; GFX9-DAG: global_load_dword [[VAL1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, off offset:4
; GCN-DAG: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 2, v{{[0-9]+}}
; GCN: v_add{{(_co)?}}_{{i|u}}32_e32 [[VPTR:v[0-9]+]], vcc, s{{[0-9]+}}, [[SHL]]
; GCN: v_add_{{i|u}}32_e32 [[VPTR:v[0-9]+]], {{(vcc, )?}}s{{[0-9]+}}, [[SHL]]
; GCN: ds_write2st64_b32 [[VPTR]], [[VAL0]], [[VAL1]] offset1:255
; GCN: s_endpgm
define amdgpu_kernel void @simple_write2st64_two_val_max_offset_f32(float addrspace(1)* %C, float addrspace(1)* %in, float addrspace(3)* %lds) #0 {
@ -91,7 +91,7 @@ define amdgpu_kernel void @simple_write2st64_two_val_max_offset_f32(float addrsp
; GFX9-DAG: global_load_dwordx2 [[VAL1:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, off offset:8
; GCN-DAG: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 3, v{{[0-9]+}}
; GCN: v_add_{{(co_)?}}{{i|u}}32_e32 [[VPTR:v[0-9]+]], vcc, s{{[0-9]+}}, [[SHL]]
; GCN: v_add_{{i|u}}32_e32 [[VPTR:v[0-9]+]], {{(vcc, )?}}s{{[0-9]+}}, [[SHL]]
; GCN: ds_write2st64_b64 [[VPTR]], [[VAL0]], [[VAL1]] offset0:4 offset1:127
; GCN: s_endpgm
define amdgpu_kernel void @simple_write2st64_two_val_max_offset_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {

View File

@ -1,6 +1,6 @@
; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=CI %s
; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s
; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI,CIVI %s
; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,CIVI,GFX89 %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,GFX89 %s
; GCN-LABEL: {{^}}void_func_i1:
; GCN: v_and_b32_e32 v0, 1, v0
@ -24,7 +24,7 @@ define void @void_func_i1_zeroext(i1 zeroext %arg0) #0 {
; GCN-LABEL: {{^}}void_func_i1_signext:
; GCN: s_waitcnt
; GCN-NEXT: v_add{{(_co)?}}_{{i|u}}32_e32 v0, vcc, 12, v0
; GCN-NEXT: v_add_{{i|u}}32_e32 v0, {{(vcc, )?}}12, v0
; GCN-NOT: v0
; GCN: buffer_store_dword v0, off
define void @void_func_i1_signext(i1 signext %arg0) #0 {
@ -60,7 +60,7 @@ define void @void_func_i8(i8 %arg0) #0 {
; GCN-LABEL: {{^}}void_func_i8_zeroext:
; GCN-NOT: and_b32
; GCN: v_add{{(_co)?}}_{{i|u}}32_e32 v0, vcc, 12, v0
; GCN: v_add_{{i|u}}32_e32 v0, {{(vcc, )?}}12, v0
define void @void_func_i8_zeroext(i8 zeroext %arg0) #0 {
%ext = zext i8 %arg0 to i32
%add = add i32 %ext, 12
@ -70,7 +70,7 @@ define void @void_func_i8_zeroext(i8 zeroext %arg0) #0 {
; GCN-LABEL: {{^}}void_func_i8_signext:
; GCN-NOT: v_bfe_i32
; GCN: v_add{{(_co)?}}_{{i|u}}32_e32 v0, vcc, 12, v0
; GCN: v_add_{{i|u}}32_e32 v0, {{(vcc, )?}}12, v0
define void @void_func_i8_signext(i8 signext %arg0) #0 {
%ext = sext i8 %arg0 to i32
%add = add i32 %ext, 12
@ -87,7 +87,7 @@ define void @void_func_i16(i16 %arg0) #0 {
; GCN-LABEL: {{^}}void_func_i16_zeroext:
; GCN-NOT: v0
; GCN: v_add{{(_co)?}}_{{i|u}}32_e32 v0, vcc, 12, v0
; GCN: v_add_{{i|u}}32_e32 v0, {{(vcc, )?}}12, v0
define void @void_func_i16_zeroext(i16 zeroext %arg0) #0 {
%ext = zext i16 %arg0 to i32
%add = add i32 %ext, 12
@ -97,7 +97,7 @@ define void @void_func_i16_zeroext(i16 zeroext %arg0) #0 {
; GCN-LABEL: {{^}}void_func_i16_signext:
; GCN-NOT: v0
; GCN: v_add{{(_co)?}}_{{i|u}}32_e32 v0, vcc, 12, v0
; GCN: v_add_{{i|u}}32_e32 v0, {{(vcc, )?}}12, v0
define void @void_func_i16_signext(i16 signext %arg0) #0 {
%ext = sext i16 %arg0 to i32
%add = add i32 %ext, 12
@ -582,7 +582,7 @@ define void @void_func_v32i32_i32_i64(<32 x i32> %arg0, i32 %arg1, i64 %arg2) #0
; GCN: buffer_store_byte [[TRUNC_ARG1_I1]], off
; GCN: buffer_store_byte [[LOAD_ARG2]], off
; GCN: buffer_store_short [[LOAD_ARG3]], off
; VI: buffer_store_short [[LOAD_ARG4]], off
; GFX89 buffer_store_short [[LOAD_ARG4]], off
; CI: buffer_store_short [[CVT_ARG4]], off
define void @void_func_v32i32_i1_i8_i16(<32 x i32> %arg0, i1 %arg1, i8 %arg2, i16 %arg3, half %arg4) #0 {

View File

@ -51,7 +51,7 @@ main_body:
; GCN: s_bfm_b64 exec, s1, 0
; GCN: s_cmp_eq_u32 s1, 64
; GCN: s_cmov_b64 exec, -1
; GCN: v_add_co_u32_e32 v0, vcc, s0, v0
; GCN: v_add_u32_e32 v0, s0, v0
define amdgpu_ps float @reuse_input(i32 inreg %count, i32 %a) {
main_body:
call void @llvm.amdgcn.init.exec.from.input(i32 %count, i32 19)
@ -65,7 +65,7 @@ main_body:
; GCN: s_bfm_b64 exec, s1, 0
; GCN: s_cmp_eq_u32 s1, 64
; GCN: s_cmov_b64 exec, -1
; GCN: v_add_co_u32_e32 v0, vcc, s0, v0
; GCN: v_add_u32_e32 v0, s0, v0
define amdgpu_ps float @reuse_input2(i32 inreg %count, i32 %a) {
main_body:
%s = add i32 %a, %count

View File

@ -141,7 +141,7 @@ define amdgpu_kernel void @store_private_offset_i8_max_offset_plus2() #0 {
; SICIVI: v_add_{{i|u}}32_e32 [[ADDR1:v[0-9]+]], vcc, 32, [[ADDR0]]
; SICIVI: buffer_store_dword v{{[0-9]+}}, [[ADDR1]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
; GFX9: v_add_co_u32_e32 [[ADDR:v[0-9]+]], vcc, 4,
; GFX9: v_add_u32_e32 [[ADDR:v[0-9]+]], 4,
; GFX9: buffer_store_dword v{{[0-9]+}}, [[ADDR]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:32
define amdgpu_kernel void @store_private_unknown_bits_vaddr() #0 {
%alloca = alloca [16 x i32], align 4

View File

@ -87,7 +87,7 @@ define amdgpu_kernel void @v_pack_v2f16(i32 addrspace(1)* %in0, i32 addrspace(1)
; GFX9: v_and_b32_e32 [[ELT0:v[0-9]+]], 0xffff, [[VAL0]]
; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], [[VAL1]], 16, [[ELT0]]
; GFX9: v_add{{(_co)?}}_{{i|u}}32_e32 v{{[0-9]+}}, vcc, 9, [[PACKED]]
; GFX9: v_add_u32_e32 v{{[0-9]+}}, 9, [[PACKED]]
define amdgpu_kernel void @v_pack_v2f16_user(i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64

View File

@ -81,7 +81,7 @@ define amdgpu_kernel void @v_pack_v2i16(i32 addrspace(1)* %in0, i32 addrspace(1)
; GFX9: v_and_b32_e32 [[MASKED:v[0-9]+]], 0xffff, [[VAL0]]
; GFX9: v_lshl_or_b32 [[PACKED:v[0-9]+]], [[VAL1]], 16, [[MASKED]]
; GFX9: v_add_co_u32_e32 v{{[0-9]+}}, vcc, 9, [[PACKED]]
; GFX9: v_add_u32_e32 v{{[0-9]+}}, 9, [[PACKED]]
define amdgpu_kernel void @v_pack_v2i16_user(i32 addrspace(1)* %in0, i32 addrspace(1)* %in1) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64

View File

@ -1,5 +1,6 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs< %s | FileCheck -check-prefixes=GCN,SICIVI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefixes=GCN,SICIVI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -verify-machineinstrs< %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs< %s
declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
@ -49,8 +50,11 @@ define amdgpu_kernel void @s_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}v_saddo_i64:
; SI: v_add_{{[iu]}}32
; SI: v_addc_u32
; SICIVI: v_add_{{[iu]}}32_e32 v{{[0-9]+}}, vcc
; SICIVI: v_addc_u32_e32 v{{[0-9]+}}, vcc
; GFX9: v_add_co_u32_e32 v{{[0-9]+}}, vcc
; GFX9: v_addc_co_u32_e32 v{{[0-9]+}}, vcc
define amdgpu_kernel void @v_saddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
%a = load i64, i64 addrspace(1)* %aptr, align 4
%b = load i64, i64 addrspace(1)* %bptr, align 4

View File

@ -1,5 +1,6 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; The code generated by sdiv is long and complex and may frequently change.

View File

@ -7,7 +7,8 @@
; NOSDWA: v_add_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v[[DST]]
; NOSDWA-NOT: v_add_{{(_co)?}}_u32_sdwa
; SDWA: v_add{{(_co)?}}_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI: v_add_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9: v_add_u32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
define amdgpu_kernel void @add_shr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%a = load i32, i32 addrspace(1)* %in, align 4
@ -22,8 +23,8 @@ define amdgpu_kernel void @add_shr_i32(i32 addrspace(1)* %out, i32 addrspace(1)*
; NOSDWA: v_subrev_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v[[DST]]
; NOSDWA-NOT: v_subrev_{{(_co)?}}_u32_sdwa
; SDWA: v_subrev{{(_co)?}}_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI: v_subrev_u32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9: v_sub_u32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
define amdgpu_kernel void @sub_shr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%a = load i32, i32 addrspace(1)* %in, align 4
%shr = lshr i32 %a, 16

View File

@ -1,11 +1,13 @@
; RUN: llc -mtriple=amdgcn-amd-amdhsa-amdgiz -mcpu=fiji -mattr=-flat-for-global -enable-ipra=0 -amdgpu-sroa=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,MESA %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa-amdgiz -mcpu=hawaii -enable-ipra=0 -amdgpu-sroa=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI,MESA %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa-amdgiz -mcpu=gfx900 -mattr=-flat-for-global -enable-ipra=0 -amdgpu-sroa=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,VI,MESA %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa-amdgiz -mcpu=fiji -mattr=-flat-for-global -enable-ipra=0 -amdgpu-sroa=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,CIVI,MESA %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa-amdgiz -mcpu=hawaii -enable-ipra=0 -amdgpu-sroa=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI,CIVI,MESA %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa-amdgiz -mcpu=gfx900 -mattr=-flat-for-global -enable-ipra=0 -amdgpu-sroa=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,MESA %s
target datalayout = "A5"
; FIXME: Why is this commuted only sometimes?
; GCN-LABEL: {{^}}i32_fastcc_i32_i32:
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_add_{{[_coiu]*}}32_e32 v0, vcc, v1, v0
; CIVI-NEXT: v_add_{{i|u}}32_e32 v0, vcc, v1, v0
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
; GCN-NEXT: s_setpc_b64
define fastcc i32 @i32_fastcc_i32_i32(i32 %arg0, i32 %arg1) #1 {
%add0 = add i32 %arg0, %arg1
@ -14,7 +16,8 @@ define fastcc i32 @i32_fastcc_i32_i32(i32 %arg0, i32 %arg1) #1 {
; GCN-LABEL: {{^}}i32_fastcc_i32_i32_stack_object:
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN: v_add_{{[_coiu]*}}32_e32 v0, vcc, v1, v
; CIVI-NEXT: v_add_{{i|u}}32_e32 v0, vcc, v1, v0
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
; GCN: s_mov_b32 s5, s32
; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:24
; GCN: s_waitcnt vmcnt(0)
@ -84,7 +87,10 @@ entry:
; GCN-NEXT: s_mov_b32 s5, s32
; GCN-NEXT: buffer_load_dword v1, off, s[0:3], s5 offset:4
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_add_{{[_coiu]*}}32_e32 v0, vcc, v1, v0
; CIVI-NEXT: v_add_{{i|u}}32_e32 v0, vcc, v1, v0
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
define fastcc i32 @i32_fastcc_i32_byval_i32(i32 %arg0, i32 addrspace(5)* byval align 4 %arg1) #1 {
%arg1.load = load i32, i32 addrspace(5)* %arg1, align 4
@ -123,9 +129,16 @@ entry:
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-DAG: buffer_load_dword [[LOAD_0:v[0-9]+]], off, s[0:3], s5 offset:4
; GCN-DAG: buffer_load_dword [[LOAD_1:v[0-9]+]], off, s[0:3], s5 offset:8
; GCN-DAG: v_add_{{[_coiu]*}}32_e32 v0, vcc, v1, v0
; GCN: v_add_{{[_coiu]*}}32_e32 v0, vcc, [[LOAD_0]], v0
; GCN: v_add_{{[_coiu]*}}32_e32 v0, vcc, [[LOAD_1]], v0
; CIVI-NEXT: v_add_{{i|u}}32_e32 v0, vcc, v1, v0
; CIVI: v_add_{{i|u}}32_e32 v0, vcc, [[LOAD_0]], v0
; CIVI: v_add_{{i|u}}32_e32 v0, vcc, [[LOAD_1]], v0
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
; GFX9: v_add_u32_e32 v0, v0, [[LOAD_0]]
; GFX9: v_add_u32_e32 v0, v0, [[LOAD_1]]
; GCN-NEXT: s_setpc_b64
define fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %arg0, i32 %arg1, [32 x i32] %large) #1 {
%val_firststack = extractvalue [32 x i32] %large, 30

View File

@ -1,6 +1,7 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SIVI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SIVI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s
; FUNC-LABEL: {{^}}s_abs_i32:
; GCN: s_abs_i32
@ -17,9 +18,13 @@ define amdgpu_kernel void @s_abs_i32(i32 addrspace(1)* %out, i32 %val) nounwind
}
; FUNC-LABEL: {{^}}v_abs_i32:
; GCN: v_sub_{{[iu]}}32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SRC:v[0-9]+]]
; SIVI: v_sub_{{i|u}}32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SRC:v[0-9]+]]
; GFX9: v_sub_u32_e32 [[NEG:v[0-9]+]], 0, [[SRC:v[0-9]+]]
; GCN: v_max_i32_e32 {{v[0-9]+}}, [[SRC]], [[NEG]]
; GCN: v_add_{{[iu]}}32
; SIVI: v_add_{{i|u}}32_e32 v{{[0-9]+}}, vcc
; GFX9: v_add_u32_e32 v{{[0-9]+}}, 2
; EG: MAX_INT
define amdgpu_kernel void @v_abs_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind {
@ -33,7 +38,8 @@ define amdgpu_kernel void @v_abs_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %
}
; GCN-LABEL: {{^}}v_abs_i32_repeat_user:
; GCN: v_sub_{{[iu]}}32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SRC:v[0-9]+]]
; SIVI: v_sub_{{i|u}}32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SRC:v[0-9]+]]
; GFX9: v_sub_u32_e32 [[NEG:v[0-9]+]], 0, [[SRC:v[0-9]+]]
; GCN: v_max_i32_e32 [[MAX:v[0-9]+]], [[SRC]], [[NEG]]
; GCN: v_mul_lo_i32 v{{[0-9]+}}, [[MAX]], [[MAX]]
define amdgpu_kernel void @v_abs_i32_repeat_user(i32 addrspace(1)* %out, i32 addrspace(1)* %src) nounwind {
@ -68,14 +74,20 @@ define amdgpu_kernel void @s_abs_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %
}
; FUNC-LABEL: {{^}}v_abs_v2i32:
; GCN-DAG: v_sub_{{[iu]}}32_e32 [[NEG0:v[0-9]+]], vcc, 0, [[SRC0:v[0-9]+]]
; GCN-DAG: v_sub_{{[iu]}}32_e32 [[NEG1:v[0-9]+]], vcc, 0, [[SRC1:v[0-9]+]]
; SIVI-DAG: v_sub_{{i|u}}32_e32 [[NEG0:v[0-9]+]], vcc, 0, [[SRC0:v[0-9]+]]
; SIVI-DAG: v_sub_{{i|u}}32_e32 [[NEG1:v[0-9]+]], vcc, 0, [[SRC1:v[0-9]+]]
; GFX9-DAG: v_sub_u32_e32 [[NEG0:v[0-9]+]], 0, [[SRC0:v[0-9]+]]
; GFX9-DAG: v_sub_u32_e32 [[NEG1:v[0-9]+]], 0, [[SRC1:v[0-9]+]]
; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[SRC0]], [[NEG0]]
; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[SRC1]], [[NEG1]]
; GCN: v_add_{{[iu]}}32
; GCN: v_add_{{[iu]}}32
; SIVI: v_add_{{i|u}}32_e32 v{{[0-9]+}}, vcc
; SIVI: v_add_{{i|u}}32_e32 v{{[0-9]+}}, vcc
; GFX9: v_add_u32_e32 v{{[0-9]+}}, 2,
; GFX9: v_add_u32_e32 v{{[0-9]+}}, 2,
; EG: MAX_INT
; EG: MAX_INT
@ -127,20 +139,31 @@ define amdgpu_kernel void @s_abs_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %
}
; FUNC-LABEL: {{^}}v_abs_v4i32:
; GCN-DAG: v_sub_{{[iu]}}32_e32 [[NEG0:v[0-9]+]], vcc, 0, [[SRC0:v[0-9]+]]
; GCN-DAG: v_sub_{{[iu]}}32_e32 [[NEG1:v[0-9]+]], vcc, 0, [[SRC1:v[0-9]+]]
; GCN-DAG: v_sub_{{[iu]}}32_e32 [[NEG2:v[0-9]+]], vcc, 0, [[SRC2:v[0-9]+]]
; GCN-DAG: v_sub_{{[iu]}}32_e32 [[NEG3:v[0-9]+]], vcc, 0, [[SRC3:v[0-9]+]]
; SIVI-DAG: v_sub_{{i|u}}32_e32 [[NEG0:v[0-9]+]], vcc, 0, [[SRC0:v[0-9]+]]
; SIVI-DAG: v_sub_{{i|u}}32_e32 [[NEG1:v[0-9]+]], vcc, 0, [[SRC1:v[0-9]+]]
; SIVI-DAG: v_sub_{{i|u}}32_e32 [[NEG2:v[0-9]+]], vcc, 0, [[SRC2:v[0-9]+]]
; SIVI-DAG: v_sub_{{i|u}}32_e32 [[NEG3:v[0-9]+]], vcc, 0, [[SRC3:v[0-9]+]]
; GFX9-DAG: v_sub_u32_e32 [[NEG0:v[0-9]+]], 0, [[SRC0:v[0-9]+]]
; GFX9-DAG: v_sub_u32_e32 [[NEG1:v[0-9]+]], 0, [[SRC1:v[0-9]+]]
; GFX9-DAG: v_sub_u32_e32 [[NEG2:v[0-9]+]], 0, [[SRC2:v[0-9]+]]
; GFX9-DAG: v_sub_u32_e32 [[NEG3:v[0-9]+]], 0, [[SRC3:v[0-9]+]]
; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[SRC0]], [[NEG0]]
; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[SRC1]], [[NEG1]]
; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[SRC2]], [[NEG2]]
; GCN-DAG: v_max_i32_e32 {{v[0-9]+}}, [[SRC3]], [[NEG3]]
; GCN: v_add_{{[iu]}}32
; GCN: v_add_{{[iu]}}32
; GCN: v_add_{{[iu]}}32
; GCN: v_add_{{[iu]}}32
; SIVI: v_add_{{i|u}}32_e32 v{{[0-9]+}}, vcc,
; SIVI: v_add_{{i|u}}32_e32 v{{[0-9]+}}, vcc,
; SIVI: v_add_{{i|u}}32_e32 v{{[0-9]+}}, vcc,
; SIVI: v_add_{{i|u}}32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_add_u32_e32 v{{[0-9]+}}, 2,
; GFX9: v_add_u32_e32 v{{[0-9]+}}, 2,
; GFX9: v_add_u32_e32 v{{[0-9]+}}, 2,
; GFX9: v_add_u32_e32 v{{[0-9]+}}, 2,
; EG: MAX_INT
; EG: MAX_INT
@ -181,8 +204,8 @@ define amdgpu_kernel void @s_min_max_i32(i32 addrspace(1)* %out0, i32 addrspace(
}
; FUNC-LABEL: {{^}}v_min_max_i32:
; GCN: {{buffer|flat}}_load_dword [[VAL0:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[VAL1:v[0-9]+]]
; GCN: {{buffer|flat|global}}_load_dword [[VAL0:v[0-9]+]]
; GCN: {{buffer|flat|global}}_load_dword [[VAL1:v[0-9]+]]
; GCN-DAG: v_min_i32_e32 v{{[0-9]+}}, [[VAL0]], [[VAL1]]
; GCN-DAG: v_max_i32_e32 v{{[0-9]+}}, [[VAL0]], [[VAL1]]

View File

@ -194,7 +194,11 @@ main_body:
; GCN-LABEL: {{^}}smrd_vgpr_offset_imm:
; GCN-NEXT: BB#
; GCN-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen offset:4095 ;
; SICIVI-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen offset:4095 ;
; GFX9-NEXT: v_add_u32_e32 [[ADD:v[0-9]+]], 0xfff, v0
; GFX9-NEXT: buffer_load_dword v{{[0-9]}}, [[ADD]], s[0:3], 0 offen ;
define amdgpu_ps float @smrd_vgpr_offset_imm(<4 x i32> inreg %desc, i32 %offset) #0 {
main_body:
%off = add i32 %offset, 4095
@ -204,7 +208,7 @@ main_body:
; GCN-LABEL: {{^}}smrd_vgpr_offset_imm_too_large:
; GCN-NEXT: BB#
; GCN-NEXT: v_add{{(_co)?}}_{{i|u}}32_e32 v0, vcc, 0x1000, v0
; GCN-NEXT: v_add_{{i|u}}32_e32 v0, {{(vcc, )?}}0x1000, v0
; GCN-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen ;
define amdgpu_ps float @smrd_vgpr_offset_imm_too_large(<4 x i32> inreg %desc, i32 %offset) #0 {
main_body:
@ -240,8 +244,16 @@ main_body:
; GCN-LABEL: {{^}}smrd_vgpr_merged:
; GCN-NEXT: BB#
; GCN-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
; GCN-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28
; SICIVI-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4
; SICIVI-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28
; GFX9: buffer_load_dword
; GFX9: buffer_load_dword
; GFX9: buffer_load_dword
; GFX9: buffer_load_dword
; GFX9: buffer_load_dword
; GFX9: buffer_load_dword
define amdgpu_ps void @smrd_vgpr_merged(<4 x i32> inreg %desc, i32 %a) #0 {
main_body:
%a1 = add i32 %a, 4

View File

@ -1,5 +1,6 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti -verify-machineinstrs< %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -verify-machineinstrs< %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs< %s
declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
@ -39,8 +40,8 @@ define amdgpu_kernel void @v_ssubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}s_ssubo_i64:
; SI: s_sub_u32
; SI: s_subb_u32
; GCN: s_sub_u32
; GCN: s_subb_u32
define amdgpu_kernel void @s_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 %a, i64 %b) nounwind {
%ssub = call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %a, i64 %b) nounwind
%val = extractvalue { i64, i1 } %ssub, 0
@ -51,8 +52,14 @@ define amdgpu_kernel void @s_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}v_ssubo_i64:
; SI: v_sub_{{[iu]}}32_e32
; SI: v_subb_u32_e32
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
; SI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_subb_co_u32_e32 v{{[0-9]+}}, vcc,
define amdgpu_kernel void @v_ssubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)* %carryout, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
%a = load i64, i64 addrspace(1)* %aptr, align 4
%b = load i64, i64 addrspace(1)* %bptr, align 4

View File

@ -1,13 +1,34 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,GFX89,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,GFX89,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=EG,FUNC %s
declare i32 @llvm.r600.read.tidig.x() readnone
; FUNC-LABEL: {{^}}s_sub_i32:
; GCN: s_load_dword [[A:s[0-9]+]]
; GCN: s_load_dword [[B:s[0-9]+]]
; GCN: s_sub_i32 s{{[0-9]+}}, [[A]], [[B]]
define amdgpu_kernel void @s_sub_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%result = sub i32 %a, %b
store i32 %result, i32 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}s_sub_imm_i32:
; GCN: s_load_dword [[A:s[0-9]+]]
; GCN: s_sub_i32 s{{[0-9]+}}, 0x4d2, [[A]]
define amdgpu_kernel void @s_sub_imm_i32(i32 addrspace(1)* %out, i32 %a) {
%result = sub i32 1234, %a
store i32 %result, i32 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}test_sub_i32:
; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; SI: v_subrev_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; GFX9: v_sub_u32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define amdgpu_kernel void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32, i32 addrspace(1)* %in
@ -17,6 +38,17 @@ define amdgpu_kernel void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)
ret void
}
; FUNC-LABEL: {{^}}test_sub_imm_i32:
; EG: SUB_INT
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc, 0x7b, v{{[0-9]+}}
; GFX9: v_sub_u32_e32 v{{[0-9]+}}, 0x7b, v{{[0-9]+}}
define amdgpu_kernel void @test_sub_imm_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
%a = load i32, i32 addrspace(1)* %in
%result = sub i32 123, %a
store i32 %result, i32 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}test_sub_v2i32:
; EG: SUB_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@ -25,6 +57,8 @@ define amdgpu_kernel void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; GFX9: v_sub_u32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; GFX9: v_sub_u32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define amdgpu_kernel void @test_sub_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32>, <2 x i32> addrspace(1) * %in
@ -45,6 +79,10 @@ define amdgpu_kernel void @test_sub_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; SI: v_sub_i32_e32 v{{[0-9]+, vcc, v[0-9]+, v[0-9]+}}
; GFX9: v_sub_u32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; GFX9: v_sub_u32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; GFX9: v_sub_u32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; GFX9: v_sub_u32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define amdgpu_kernel void @test_sub_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32>, <4 x i32> addrspace(1) * %in
@ -54,49 +92,58 @@ define amdgpu_kernel void @test_sub_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32
ret void
}
; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; FUNC-LABEL: {{^}}test_sub_i16:
; SI: v_subrev_i32_e32 v{{[0-9]+}}, vcc,
; GFX89: v_sub_u16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define amdgpu_kernel void @test_sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
%b_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
%a = load i16, i16 addrspace(1)* %in
%b = load i16, i16 addrspace(1)* %b_ptr
%result = sub i16 %a, %b
store i16 %result, i16 addrspace(1)* %out
ret void
%tid = call i32 @llvm.r600.read.tidig.x()
%gep = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
%b_ptr = getelementptr i16, i16 addrspace(1)* %gep, i32 1
%a = load volatile i16, i16 addrspace(1)* %gep
%b = load volatile i16, i16 addrspace(1)* %b_ptr
%result = sub i16 %a, %b
store i16 %result, i16 addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}test_sub_v2i16:
; VI: v_sub_u16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_u16_sdwa v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; GFX9: v_pk_sub_i16
define amdgpu_kernel void @test_sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
%b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i16 1
%a = load <2 x i16>, <2 x i16> addrspace(1) * %in
%b = load <2 x i16>, <2 x i16> addrspace(1) * %b_ptr
%result = sub <2 x i16> %a, %b
store <2 x i16> %result, <2 x i16> addrspace(1)* %out
ret void
%tid = call i32 @llvm.r600.read.tidig.x()
%gep = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i32 %tid
%b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %gep, i16 1
%a = load <2 x i16>, <2 x i16> addrspace(1)* %gep
%b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr
%result = sub <2 x i16> %a, %b
store <2 x i16> %result, <2 x i16> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}test_sub_v4i16:
; VI: v_sub_u16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_u16_sdwa v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_u16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_u16_sdwa v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; VI: v_sub_i16_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; GFX9: v_pk_sub_i16
; GFX9: v_pk_sub_i16
define amdgpu_kernel void @test_sub_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
%b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in, i16 1
%a = load <4 x i16>, <4 x i16> addrspace(1) * %in
%b = load <4 x i16>, <4 x i16> addrspace(1) * %b_ptr
%result = sub <4 x i16> %a, %b
store <4 x i16> %result, <4 x i16> addrspace(1)* %out
ret void
%tid = call i32 @llvm.r600.read.tidig.x()
%gep = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in, i32 %tid
%b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %gep, i16 1
%a = load <4 x i16>, <4 x i16> addrspace(1) * %gep
%b = load <4 x i16>, <4 x i16> addrspace(1) * %b_ptr
%result = sub <4 x i16> %a, %b
store <4 x i16> %result, <4 x i16> addrspace(1)* %out
ret void
}
; FUNC-LABEL: {{^}}s_sub_i64:
; SI: s_sub_u32
; SI: s_subb_u32
; GCN: s_sub_u32
; GCN: s_subb_u32
; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
; EG-DAG: SUB_INT {{[* ]*}}
@ -113,6 +160,12 @@ define amdgpu_kernel void @s_sub_i64(i64 addrspace(1)* noalias %out, i64 %a, i64
; SI: v_sub_i32_e32
; SI: v_subb_u32_e32
; VI: v_sub_u32_e32
; VI: v_subb_u32_e32
; GFX9: v_sub_co_u32_e32
; GFX9: v_subb_co_u32_e32
; EG: MEM_RAT_CACHELESS STORE_RAW T{{[0-9]+}}.XY
; EG-DAG: SUB_INT {{[* ]*}}
; EG-DAG: SUBB_UINT
@ -130,10 +183,20 @@ define amdgpu_kernel void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspa
}
; FUNC-LABEL: {{^}}v_test_sub_v2i64:
; SI: v_sub_i32_e32
; SI: v_subb_u32_e32
; SI: v_sub_i32_e32
; SI: v_subb_u32_e32
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
; SI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
; SI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_subb_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_subb_co_u32_e32 v{{[0-9]+}}, vcc,
define amdgpu_kernel void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
%a_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inA, i32 %tid
@ -146,14 +209,32 @@ define amdgpu_kernel void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i
}
; FUNC-LABEL: {{^}}v_test_sub_v4i64:
; SI: v_sub_i32_e32
; SI: v_subb_u32_e32
; SI: v_sub_i32_e32
; SI: v_subb_u32_e32
; SI: v_sub_i32_e32
; SI: v_subb_u32_e32
; SI: v_sub_i32_e32
; SI: v_subb_u32_e32
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
; SI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
; SI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
; SI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc,
; SI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_subb_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_subb_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_subb_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_subb_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_subb_co_u32_e32 v{{[0-9]+}}, vcc,
define amdgpu_kernel void @v_test_sub_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* noalias %inA, <4 x i64> addrspace(1)* noalias %inB) {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
%a_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inA, i32 %tid

View File

@ -1,6 +1,7 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s
; FUNC-LABEL: {{^}}s_uaddo_i64_zext:
; GCN: s_add_u32
@ -22,7 +23,10 @@ define amdgpu_kernel void @s_uaddo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64
; FIXME: Could do scalar
; FUNC-LABEL: {{^}}s_uaddo_i32:
; GCN: v_add_{{[iu]}}32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
; SI: v_add_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
; VI: v_add_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
; GFX9: v_add_co_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
; EG: ADDC_UINT
@ -37,7 +41,10 @@ define amdgpu_kernel void @s_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}v_uaddo_i32:
; GCN: v_add_{{[iu]}}32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_add_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_add_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; GFX9: v_add_co_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
; EG: ADDC_UINT
@ -58,7 +65,10 @@ define amdgpu_kernel void @v_uaddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}v_uaddo_i32_novcc:
; GCN: v_add_{{[iu]}}32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_add_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_add_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; GFX9: v_add_co_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
; EG: ADDC_UINT
@ -95,8 +105,14 @@ define amdgpu_kernel void @s_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}v_uaddo_i64:
; GCN: v_add_{{[iu]}}32
; GCN: v_addc_u32
; SI: v_add_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_addc_u32_e32 v{{[0-9]+}}, vcc,
; VI: v_add_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_addc_u32_e32 v{{[0-9]+}}, vcc,
; GFX9: v_add_co_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; GFX9: v_addc_co_u32_e32 v{{[0-9]+}}, vcc,
; EG: ADDC_UINT
; EG: ADD_INT
@ -118,6 +134,9 @@ define amdgpu_kernel void @v_uaddo_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
; FUNC-LABEL: {{^}}v_uaddo_i16:
; VI: v_add_u16_e32
; VI: v_cmp_lt_u16_e32
; GFX9: v_add_u16_e32
; GFX9: v_cmp_lt_u16_e32
define amdgpu_kernel void @v_uaddo_i16(i16 addrspace(1)* %out, i1 addrspace(1)* %carryout, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64

View File

@ -1,5 +1,6 @@
;RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck --check-prefix=SI --check-prefix=GCN --check-prefix=FUNC %s
;RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=VI --check-prefix=GCN --check-prefix=FUNC %s
;RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefix=VI --check-prefix=GCN --check-prefix=FUNC %s
;RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=EG --check-prefix=FUNC %s
;FUNC-LABEL: {{^}}test_udiv:

View File

@ -1,6 +1,7 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,SICIVI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,SICIVI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefixes=EG,FUNC %s
; FUNC-LABEL: {{^}}s_usubo_i64_zext:
; GCN: s_sub_u32
@ -22,7 +23,10 @@ define amdgpu_kernel void @s_usubo_i64_zext(i64 addrspace(1)* %out, i64 %a, i64
; FIXME: Could do scalar
; FUNC-LABEL: {{^}}s_usubo_i32:
; GCN: v_sub_{{[iu]}}32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
; EG-DAG: SUBB_UINT
@ -37,7 +41,10 @@ define amdgpu_kernel void @s_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}v_usubo_i32:
; GCN: v_sub_{{[iu]}}32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
; EG-DAG: SUBB_UINT
@ -58,7 +65,10 @@ define amdgpu_kernel void @v_usubo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}v_usubo_i32_novcc:
; GCN: v_sub_{{[iu]}}32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
; EG-DAG: SUBB_UINT
@ -97,8 +107,13 @@ define amdgpu_kernel void @s_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}v_usubo_i64:
; GCN: v_sub_{{[iu]}}32
; GCN: v_subb_u32
; SI: v_sub_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; SI: v_subb_u32
; VI: v_sub_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; VI: v_subb_u32
; GFX9: v_sub_co_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
; GFX9: v_subb_co_u32
; EG-DAG: SUBB_UINT
; EG-DAG: SUB_INT
@ -120,8 +135,15 @@ define amdgpu_kernel void @v_usubo_i64(i64 addrspace(1)* %out, i1 addrspace(1)*
}
; FUNC-LABEL: {{^}}v_usubo_i16:
; SI: v_subrev_i32_e32
; SI: v_and_b32
; SI: v_cmp_ne_u32_e32
; VI: v_sub_u16_e32
; VI: v_cmp_gt_u16_e32
; GFX9: v_sub_u16_e32
; GFX9: v_cmp_gt_u16_e32
define amdgpu_kernel void @v_usubo_i16(i16 addrspace(1)* %out, i1 addrspace(1)* %carryout, i16 addrspace(1)* %a.ptr, i16 addrspace(1)* %b.ptr) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64