AMDGPU: Handle sub of constant for DS offset folding

sub C, x - > add (sub 0, x), C for DS offsets.

This is mostly to fix regressions that show up when
SeparateConstOffsetFromGEP is enabled.

llvm-svn: 247054
This commit is contained in:
Matt Arsenault 2015-09-08 19:34:22 +00:00
parent 640499b1fa
commit 966a94f861
2 changed files with 186 additions and 10 deletions

View File

@ -893,15 +893,39 @@ bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
Offset = N1;
return true;
}
}
} else if (Addr.getOpcode() == ISD::SUB) {
// sub C, x -> add (sub 0, x), C
if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
int64_t ByteOffset = C->getSExtValue();
if (isUInt<16>(ByteOffset)) {
SDLoc DL(Addr);
SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
SDLoc DL(Addr);
// XXX - This is kind of hacky. Create a dummy sub node so we can check
// the known bits in isDSOffsetLegal. We need to emit the selected node
// here, so this is thrown away.
SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
Zero, Addr.getOperand(1));
if (isDSOffsetLegal(Sub, ByteOffset, 16)) {
MachineSDNode *MachineSub
= CurDAG->getMachineNode(AMDGPU::V_SUB_I32_e32, DL, MVT::i32,
Zero, Addr.getOperand(1));
Base = SDValue(MachineSub, 0);
Offset = Addr.getOperand(0);
return true;
}
}
}
} else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
// If we have a constant address, prefer to put the constant into the
// offset. This can save moves to load the constant address since multiple
// operations can share the zero base address register, and enables merging
// into read2 / write2 instructions.
SDLoc DL(Addr);
// If we have a constant address, prefer to put the constant into the
// offset. This can save moves to load the constant address since multiple
// operations can share the zero base address register, and enables merging
// into read2 / write2 instructions.
if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
if (isUInt<16>(CAddr->getZExtValue())) {
SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
@ -914,10 +938,11 @@ bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
// default case
Base = Addr;
Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16);
return true;
}
// TODO: If offset is too big, put low 16-bit into offset.
bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
SDValue &Offset0,
SDValue &Offset1) const {
@ -936,9 +961,35 @@ bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
return true;
}
}
} else if (Addr.getOpcode() == ISD::SUB) {
// sub C, x -> add (sub 0, x), C
if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
unsigned DWordOffset0 = C->getZExtValue() / 4;
unsigned DWordOffset1 = DWordOffset0 + 1;
if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
if (isUInt<8>(DWordOffset0)) {
SDLoc DL(Addr);
SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
// XXX - This is kind of hacky. Create a dummy sub node so we can check
// the known bits in isDSOffsetLegal. We need to emit the selected node
// here, so this is thrown away.
SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
Zero, Addr.getOperand(1));
if (isDSOffsetLegal(Sub, DWordOffset1, 8)) {
MachineSDNode *MachineSub
= CurDAG->getMachineNode(AMDGPU::V_SUB_I32_e32, DL, MVT::i32,
Zero, Addr.getOperand(1));
Base = SDValue(MachineSub, 0);
Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
return true;
}
}
}
} else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
unsigned DWordOffset0 = CAddr->getZExtValue() / 4;
unsigned DWordOffset1 = DWordOffset0 + 1;
assert(4 * DWordOffset0 == CAddr->getZExtValue());

View File

@ -0,0 +1,125 @@
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
declare void @llvm.AMDGPU.barrier.local() #2
declare i32 @llvm.r600.read.tidig.x() #0
@lds.obj = addrspace(3) global [256 x i32] undef, align 4
; GCN-LABEL: {{^}}write_ds_sub0_offset0_global:
; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 2, v0
; GCN: v_sub_i32_e32 [[BASEPTR:v[0-9]+]], vcc, 0, [[SHL]]
; GCN: v_mov_b32_e32 [[VAL:v[0-9]+]], 0x7b
; GCN: ds_write_b32 [[BASEPTR]], [[VAL]] offset:12
define void @write_ds_sub0_offset0_global() #0 {
entry:
%x.i = call i32 @llvm.r600.read.tidig.x() #1
%sub1 = sub i32 0, %x.i
%tmp0 = getelementptr [256 x i32], [256 x i32] addrspace(3)* @lds.obj, i32 0, i32 %sub1
%arrayidx = getelementptr inbounds i32, i32 addrspace(3)* %tmp0, i32 3
store i32 123, i32 addrspace(3)* %arrayidx
ret void
}
; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_max_offset:
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 13
; GCN: ds_write_b8 [[NEG]], [[K]] offset:65535
define void @add_x_shl_neg_to_sub_max_offset() #1 {
%x.i = call i32 @llvm.r600.read.tidig.x() #0
%neg = sub i32 0, %x.i
%shl = shl i32 %neg, 2
%add = add i32 65535, %shl
%ptr = inttoptr i32 %add to i8 addrspace(3)*
store i8 13, i8 addrspace(3)* %ptr
ret void
}
; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_max_offset_p1:
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0x10000, [[SCALED]]
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 13
; GCN: ds_write_b8 [[NEG]], [[K]]{{$}}
define void @add_x_shl_neg_to_sub_max_offset_p1() #1 {
%x.i = call i32 @llvm.r600.read.tidig.x() #0
%neg = sub i32 0, %x.i
%shl = shl i32 %neg, 2
%add = add i32 65536, %shl
%ptr = inttoptr i32 %add to i8 addrspace(3)*
store i8 13, i8 addrspace(3)* %ptr
ret void
}
; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_multi_use:
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 13
; GCN-NOT: v_sub
; GCN: ds_write_b32 [[NEG]], [[K]] offset:123{{$}}
; GCN-NOT: v_sub
; GCN: ds_write_b32 [[NEG]], [[K]] offset:456{{$}}
; GCN: s_endpgm
define void @add_x_shl_neg_to_sub_multi_use() #1 {
%x.i = call i32 @llvm.r600.read.tidig.x() #0
%neg = sub i32 0, %x.i
%shl = shl i32 %neg, 2
%add0 = add i32 123, %shl
%add1 = add i32 456, %shl
%ptr0 = inttoptr i32 %add0 to i32 addrspace(3)*
store volatile i32 13, i32 addrspace(3)* %ptr0
%ptr1 = inttoptr i32 %add1 to i32 addrspace(3)*
store volatile i32 13, i32 addrspace(3)* %ptr1
ret void
}
; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_multi_use_same_offset:
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 13
; GCN-NOT: v_sub
; GCN: ds_write_b32 [[NEG]], [[K]] offset:123{{$}}
; GCN-NOT: v_sub
; GCN: ds_write_b32 [[NEG]], [[K]] offset:123{{$}}
; GCN: s_endpgm
define void @add_x_shl_neg_to_sub_multi_use_same_offset() #1 {
%x.i = call i32 @llvm.r600.read.tidig.x() #0
%neg = sub i32 0, %x.i
%shl = shl i32 %neg, 2
%add = add i32 123, %shl
%ptr = inttoptr i32 %add to i32 addrspace(3)*
store volatile i32 13, i32 addrspace(3)* %ptr
store volatile i32 13, i32 addrspace(3)* %ptr
ret void
}
; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_misaligned_i64_max_offset:
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0, [[SCALED]]
; GCN: ds_write2_b32 [[NEG]], {{v[0-9]+}}, {{v[0-9]+}} offset0:254 offset1:255
define void @add_x_shl_neg_to_sub_misaligned_i64_max_offset() #1 {
%x.i = call i32 @llvm.r600.read.tidig.x() #0
%neg = sub i32 0, %x.i
%shl = shl i32 %neg, 2
%add = add i32 1019, %shl
%ptr = inttoptr i32 %add to i64 addrspace(3)*
store i64 123, i64 addrspace(3)* %ptr, align 4
ret void
}
; GCN-LABEL: {{^}}add_x_shl_neg_to_sub_misaligned_i64_max_offset_p1:
; GCN-DAG: v_lshlrev_b32_e32 [[SCALED:v[0-9]+]], 2, v0
; GCN-DAG: v_sub_i32_e32 [[NEG:v[0-9]+]], vcc, 0x3fc, [[SCALED]]
; GCN: ds_write2_b32 [[NEG]], {{v[0-9]+}}, {{v[0-9]+}} offset1:1{{$}}
define void @add_x_shl_neg_to_sub_misaligned_i64_max_offset_p1() #1 {
%x.i = call i32 @llvm.r600.read.tidig.x() #0
%neg = sub i32 0, %x.i
%shl = shl i32 %neg, 2
%add = add i32 1020, %shl
%ptr = inttoptr i32 %add to i64 addrspace(3)*
store i64 123, i64 addrspace(3)* %ptr, align 4
ret void
}
attributes #0 = { nounwind readnone }
attributes #1 = { nounwind }
attributes #2 = { nounwind noduplicate convergent }