forked from OSchip/llvm-project
AMDGPU: Initial, crude support for indirect calls
This isn't really usable, and requires using the -amdgpu-fixed-function-abi flag to work. Assumes a uniform call target, and will hit a verifier error if the call target ends up in a VGPR. Also doesn't attempt to do anything sensible for the reported register/stack usage.
This commit is contained in:
parent
ea4597eef1
commit
4ea1baf6a0
|
@ -159,13 +159,14 @@ struct AMDGPUFunctionArgInfo {
|
|||
|
||||
class AMDGPUArgumentUsageInfo : public ImmutablePass {
|
||||
private:
|
||||
static const AMDGPUFunctionArgInfo ExternFunctionInfo;
|
||||
static const AMDGPUFunctionArgInfo FixedABIFunctionInfo;
|
||||
DenseMap<const Function *, AMDGPUFunctionArgInfo> ArgInfoMap;
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
|
||||
static const AMDGPUFunctionArgInfo ExternFunctionInfo;
|
||||
static const AMDGPUFunctionArgInfo FixedABIFunctionInfo;
|
||||
|
||||
AMDGPUArgumentUsageInfo() : ImmutablePass(ID) { }
|
||||
|
||||
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
||||
|
|
|
@ -601,6 +601,15 @@ int32_t AMDGPUAsmPrinter::SIFunctionResourceInfo::getTotalNumVGPRs(
|
|||
return std::max(NumVGPR, NumAGPR);
|
||||
}
|
||||
|
||||
static const Function *getCalleeFunction(const MachineOperand &Op) {
|
||||
if (Op.isImm()) {
|
||||
assert(Op.getImm() == 0);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return cast<Function>(Op.getGlobal());
|
||||
}
|
||||
|
||||
AMDGPUAsmPrinter::SIFunctionResourceInfo AMDGPUAsmPrinter::analyzeResourceUsage(
|
||||
const MachineFunction &MF) const {
|
||||
SIFunctionResourceInfo Info;
|
||||
|
@ -853,8 +862,9 @@ AMDGPUAsmPrinter::SIFunctionResourceInfo AMDGPUAsmPrinter::analyzeResourceUsage(
|
|||
|
||||
const MachineOperand *CalleeOp
|
||||
= TII->getNamedOperand(MI, AMDGPU::OpName::callee);
|
||||
const Function *Callee = cast<Function>(CalleeOp->getGlobal());
|
||||
if (Callee->isDeclaration()) {
|
||||
|
||||
const Function *Callee = getCalleeFunction(*CalleeOp);
|
||||
if (!Callee || Callee->isDeclaration()) {
|
||||
// If this is a call to an external function, we can't do much. Make
|
||||
// conservative guesses.
|
||||
|
||||
|
@ -897,7 +907,8 @@ AMDGPUAsmPrinter::SIFunctionResourceInfo AMDGPUAsmPrinter::analyzeResourceUsage(
|
|||
Info.HasRecursion |= I->second.HasRecursion;
|
||||
}
|
||||
|
||||
if (!Callee->doesNotRecurse())
|
||||
// FIXME: Call site could have norecurse on it
|
||||
if (!Callee || !Callee->doesNotRecurse())
|
||||
Info.HasRecursion = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2445,21 +2445,20 @@ void SITargetLowering::passSpecialInputs(
|
|||
if (!CLI.CS)
|
||||
return;
|
||||
|
||||
const Function *CalleeFunc = CLI.CS.getCalledFunction();
|
||||
assert(CalleeFunc);
|
||||
|
||||
SelectionDAG &DAG = CLI.DAG;
|
||||
const SDLoc &DL = CLI.DL;
|
||||
|
||||
const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
|
||||
|
||||
auto &ArgUsageInfo =
|
||||
DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
|
||||
const AMDGPUFunctionArgInfo &CalleeArgInfo
|
||||
= ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
|
||||
|
||||
const AMDGPUFunctionArgInfo &CallerArgInfo = Info.getArgInfo();
|
||||
|
||||
const AMDGPUFunctionArgInfo *CalleeArgInfo
|
||||
= &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo;
|
||||
if (const Function *CalleeFunc = CLI.CS.getCalledFunction()) {
|
||||
auto &ArgUsageInfo =
|
||||
DAG.getPass()->getAnalysis<AMDGPUArgumentUsageInfo>();
|
||||
CalleeArgInfo = &ArgUsageInfo.lookupFuncArgInfo(*CalleeFunc);
|
||||
}
|
||||
|
||||
// TODO: Unify with private memory register handling. This is complicated by
|
||||
// the fact that at least in kernels, the input argument is not necessarily
|
||||
// in the same location as the input.
|
||||
|
@ -2477,7 +2476,7 @@ void SITargetLowering::passSpecialInputs(
|
|||
const ArgDescriptor *OutgoingArg;
|
||||
const TargetRegisterClass *ArgRC;
|
||||
|
||||
std::tie(OutgoingArg, ArgRC) = CalleeArgInfo.getPreloadedValue(InputID);
|
||||
std::tie(OutgoingArg, ArgRC) = CalleeArgInfo->getPreloadedValue(InputID);
|
||||
if (!OutgoingArg)
|
||||
continue;
|
||||
|
||||
|
@ -2518,13 +2517,13 @@ void SITargetLowering::passSpecialInputs(
|
|||
const TargetRegisterClass *ArgRC;
|
||||
|
||||
std::tie(OutgoingArg, ArgRC) =
|
||||
CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
|
||||
CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
|
||||
if (!OutgoingArg)
|
||||
std::tie(OutgoingArg, ArgRC) =
|
||||
CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
|
||||
CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
|
||||
if (!OutgoingArg)
|
||||
std::tie(OutgoingArg, ArgRC) =
|
||||
CalleeArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
|
||||
CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
|
||||
if (!OutgoingArg)
|
||||
return;
|
||||
|
||||
|
@ -2539,10 +2538,10 @@ void SITargetLowering::passSpecialInputs(
|
|||
SDLoc SL;
|
||||
|
||||
// If incoming ids are not packed we need to pack them.
|
||||
if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo.WorkItemIDX)
|
||||
if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX)
|
||||
InputReg = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgX);
|
||||
|
||||
if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo.WorkItemIDY) {
|
||||
if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY) {
|
||||
SDValue Y = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgY);
|
||||
Y = DAG.getNode(ISD::SHL, SL, MVT::i32, Y,
|
||||
DAG.getShiftAmountConstant(10, MVT::i32, SL));
|
||||
|
@ -2550,7 +2549,7 @@ void SITargetLowering::passSpecialInputs(
|
|||
DAG.getNode(ISD::OR, SL, MVT::i32, InputReg, Y) : Y;
|
||||
}
|
||||
|
||||
if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo.WorkItemIDZ) {
|
||||
if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ) {
|
||||
SDValue Z = loadInputValue(DAG, ArgRC, MVT::i32, DL, *IncomingArgZ);
|
||||
Z = DAG.getNode(ISD::SHL, SL, MVT::i32, Z,
|
||||
DAG.getShiftAmountConstant(20, MVT::i32, SL));
|
||||
|
@ -2708,7 +2707,7 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
|
|||
if (!CLI.CS.getInstruction())
|
||||
report_fatal_error("unsupported libcall legalization");
|
||||
|
||||
if (!CLI.CS.getCalledFunction()) {
|
||||
if (!AMDGPUTargetMachine::EnableFixedFunctionABI && !CLI.CS.getCalledFunction()) {
|
||||
return lowerUnhandledCall(CLI, InVals,
|
||||
"unsupported indirect call to function ");
|
||||
}
|
||||
|
@ -2937,9 +2936,12 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
|
|||
Ops.push_back(Callee);
|
||||
// Add a redundant copy of the callee global which will not be legalized, as
|
||||
// we need direct access to the callee later.
|
||||
GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Callee);
|
||||
const GlobalValue *GV = GSD->getGlobal();
|
||||
Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
|
||||
if (GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(Callee)) {
|
||||
const GlobalValue *GV = GSD->getGlobal();
|
||||
Ops.push_back(DAG.getTargetGlobalAddress(GV, DL, MVT::i64));
|
||||
} else {
|
||||
Ops.push_back(DAG.getTargetConstant(0, DL, MVT::i64));
|
||||
}
|
||||
|
||||
if (IsTailCall) {
|
||||
// Each tail call may have to adjust the stack by a different amount, so
|
||||
|
|
|
@ -445,8 +445,8 @@ def SI_RETURN : SPseudoInstSI <
|
|||
|
||||
// Return for returning function calls without output register.
|
||||
//
|
||||
// This version is only needed so we can fill in the output regiter in
|
||||
// the custom inserter.
|
||||
// This version is only needed so we can fill in the output register
|
||||
// in the custom inserter.
|
||||
def SI_CALL_ISEL : SPseudoInstSI <
|
||||
(outs), (ins SSrc_b64:$src0, unknown:$callee),
|
||||
[(AMDGPUcall i64:$src0, tglobaladdr:$callee)]> {
|
||||
|
@ -458,6 +458,11 @@ def SI_CALL_ISEL : SPseudoInstSI <
|
|||
let isConvergent = 1;
|
||||
}
|
||||
|
||||
def : GCNPat<
|
||||
(AMDGPUcall i64:$src0, (i64 0)),
|
||||
(SI_CALL_ISEL $src0, (i64 0))
|
||||
>;
|
||||
|
||||
// Wrapper around s_swappc_b64 with extra $callee parameter to track
|
||||
// the called function after regalloc.
|
||||
def SI_CALL : SPseudoInstSI <
|
||||
|
|
|
@ -0,0 +1,201 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -amdgpu-fixed-function-abi -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
|
||||
|
||||
@gv.fptr0 = external hidden unnamed_addr addrspace(4) constant void()*, align 4
|
||||
@gv.fptr1 = external hidden unnamed_addr addrspace(4) constant void(i32)*, align 4
|
||||
|
||||
define amdgpu_kernel void @test_indirect_call_sgpr_ptr() {
|
||||
; GCN-LABEL: test_indirect_call_sgpr_ptr:
|
||||
; GCN: .amd_kernel_code_t
|
||||
; GCN-NEXT: amd_code_version_major = 1
|
||||
; GCN-NEXT: amd_code_version_minor = 2
|
||||
; GCN-NEXT: amd_machine_kind = 1
|
||||
; GCN-NEXT: amd_machine_version_major = 7
|
||||
; GCN-NEXT: amd_machine_version_minor = 0
|
||||
; GCN-NEXT: amd_machine_version_stepping = 0
|
||||
; GCN-NEXT: kernel_code_entry_byte_offset = 256
|
||||
; GCN-NEXT: kernel_code_prefetch_byte_size = 0
|
||||
; GCN-NEXT: granulated_workitem_vgpr_count = 7
|
||||
; GCN-NEXT: granulated_wavefront_sgpr_count = 5
|
||||
; GCN-NEXT: priority = 0
|
||||
; GCN-NEXT: float_mode = 192
|
||||
; GCN-NEXT: priv = 0
|
||||
; GCN-NEXT: enable_dx10_clamp = 1
|
||||
; GCN-NEXT: debug_mode = 0
|
||||
; GCN-NEXT: enable_ieee_mode = 1
|
||||
; GCN-NEXT: enable_wgp_mode = 0
|
||||
; GCN-NEXT: enable_mem_ordered = 0
|
||||
; GCN-NEXT: enable_fwd_progress = 0
|
||||
; GCN-NEXT: enable_sgpr_private_segment_wave_byte_offset = 1
|
||||
; GCN-NEXT: user_sgpr_count = 14
|
||||
; GCN-NEXT: enable_trap_handler = 0
|
||||
; GCN-NEXT: enable_sgpr_workgroup_id_x = 1
|
||||
; GCN-NEXT: enable_sgpr_workgroup_id_y = 1
|
||||
; GCN-NEXT: enable_sgpr_workgroup_id_z = 1
|
||||
; GCN-NEXT: enable_sgpr_workgroup_info = 0
|
||||
; GCN-NEXT: enable_vgpr_workitem_id = 2
|
||||
; GCN-NEXT: enable_exception_msb = 0
|
||||
; GCN-NEXT: granulated_lds_size = 0
|
||||
; GCN-NEXT: enable_exception = 0
|
||||
; GCN-NEXT: enable_sgpr_private_segment_buffer = 1
|
||||
; GCN-NEXT: enable_sgpr_dispatch_ptr = 1
|
||||
; GCN-NEXT: enable_sgpr_queue_ptr = 1
|
||||
; GCN-NEXT: enable_sgpr_kernarg_segment_ptr = 1
|
||||
; GCN-NEXT: enable_sgpr_dispatch_id = 1
|
||||
; GCN-NEXT: enable_sgpr_flat_scratch_init = 1
|
||||
; GCN-NEXT: enable_sgpr_private_segment_size = 0
|
||||
; GCN-NEXT: enable_sgpr_grid_workgroup_count_x = 0
|
||||
; GCN-NEXT: enable_sgpr_grid_workgroup_count_y = 0
|
||||
; GCN-NEXT: enable_sgpr_grid_workgroup_count_z = 0
|
||||
; GCN-NEXT: enable_wavefront_size32 = 0
|
||||
; GCN-NEXT: enable_ordered_append_gds = 0
|
||||
; GCN-NEXT: private_element_size = 1
|
||||
; GCN-NEXT: is_ptr64 = 1
|
||||
; GCN-NEXT: is_dynamic_callstack = 1
|
||||
; GCN-NEXT: is_debug_enabled = 0
|
||||
; GCN-NEXT: is_xnack_enabled = 1
|
||||
; GCN-NEXT: workitem_private_segment_byte_size = 16384
|
||||
; GCN-NEXT: workgroup_group_segment_byte_size = 0
|
||||
; GCN-NEXT: gds_segment_byte_size = 0
|
||||
; GCN-NEXT: kernarg_segment_byte_size = 0
|
||||
; GCN-NEXT: workgroup_fbarrier_count = 0
|
||||
; GCN-NEXT: wavefront_sgpr_count = 48
|
||||
; GCN-NEXT: workitem_vgpr_count = 32
|
||||
; GCN-NEXT: reserved_vgpr_first = 0
|
||||
; GCN-NEXT: reserved_vgpr_count = 0
|
||||
; GCN-NEXT: reserved_sgpr_first = 0
|
||||
; GCN-NEXT: reserved_sgpr_count = 0
|
||||
; GCN-NEXT: debug_wavefront_private_segment_offset_sgpr = 0
|
||||
; GCN-NEXT: debug_private_segment_buffer_sgpr = 0
|
||||
; GCN-NEXT: kernarg_segment_alignment = 4
|
||||
; GCN-NEXT: group_segment_alignment = 4
|
||||
; GCN-NEXT: private_segment_alignment = 4
|
||||
; GCN-NEXT: wavefront_size = 6
|
||||
; GCN-NEXT: call_convention = -1
|
||||
; GCN-NEXT: runtime_loader_kernel_symbol = 0
|
||||
; GCN-NEXT: .end_amd_kernel_code_t
|
||||
; GCN-NEXT: ; %bb.0:
|
||||
; GCN-NEXT: s_mov_b32 s33, s17
|
||||
; GCN-NEXT: s_mov_b32 s32, s33
|
||||
; GCN-NEXT: s_mov_b32 flat_scratch_lo, s13
|
||||
; GCN-NEXT: s_add_u32 s12, s12, s33
|
||||
; GCN-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
||||
; GCN-NEXT: s_getpc_b64 s[4:5]
|
||||
; GCN-NEXT: s_add_u32 s4, s4, gv.fptr0@rel32@lo+4
|
||||
; GCN-NEXT: s_addc_u32 s5, s5, gv.fptr0@rel32@hi+4
|
||||
; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
|
||||
; GCN-NEXT: v_lshlrev_b32_e32 v2, 20, v2
|
||||
; GCN-NEXT: v_lshlrev_b32_e32 v1, 10, v1
|
||||
; GCN-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; GCN-NEXT: v_or_b32_e32 v31, v0, v2
|
||||
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; GCN-NEXT: s_swappc_b64 s[30:31], s[4:5]
|
||||
; GCN-NEXT: s_endpgm
|
||||
%fptr = load void()*, void()* addrspace(4)* @gv.fptr0
|
||||
call void %fptr()
|
||||
ret void
|
||||
}
|
||||
|
||||
define amdgpu_kernel void @test_indirect_call_sgpr_ptr_arg() {
|
||||
; GCN-LABEL: test_indirect_call_sgpr_ptr_arg:
|
||||
; GCN: .amd_kernel_code_t
|
||||
; GCN-NEXT: amd_code_version_major = 1
|
||||
; GCN-NEXT: amd_code_version_minor = 2
|
||||
; GCN-NEXT: amd_machine_kind = 1
|
||||
; GCN-NEXT: amd_machine_version_major = 7
|
||||
; GCN-NEXT: amd_machine_version_minor = 0
|
||||
; GCN-NEXT: amd_machine_version_stepping = 0
|
||||
; GCN-NEXT: kernel_code_entry_byte_offset = 256
|
||||
; GCN-NEXT: kernel_code_prefetch_byte_size = 0
|
||||
; GCN-NEXT: granulated_workitem_vgpr_count = 7
|
||||
; GCN-NEXT: granulated_wavefront_sgpr_count = 5
|
||||
; GCN-NEXT: priority = 0
|
||||
; GCN-NEXT: float_mode = 192
|
||||
; GCN-NEXT: priv = 0
|
||||
; GCN-NEXT: enable_dx10_clamp = 1
|
||||
; GCN-NEXT: debug_mode = 0
|
||||
; GCN-NEXT: enable_ieee_mode = 1
|
||||
; GCN-NEXT: enable_wgp_mode = 0
|
||||
; GCN-NEXT: enable_mem_ordered = 0
|
||||
; GCN-NEXT: enable_fwd_progress = 0
|
||||
; GCN-NEXT: enable_sgpr_private_segment_wave_byte_offset = 1
|
||||
; GCN-NEXT: user_sgpr_count = 14
|
||||
; GCN-NEXT: enable_trap_handler = 0
|
||||
; GCN-NEXT: enable_sgpr_workgroup_id_x = 1
|
||||
; GCN-NEXT: enable_sgpr_workgroup_id_y = 1
|
||||
; GCN-NEXT: enable_sgpr_workgroup_id_z = 1
|
||||
; GCN-NEXT: enable_sgpr_workgroup_info = 0
|
||||
; GCN-NEXT: enable_vgpr_workitem_id = 2
|
||||
; GCN-NEXT: enable_exception_msb = 0
|
||||
; GCN-NEXT: granulated_lds_size = 0
|
||||
; GCN-NEXT: enable_exception = 0
|
||||
; GCN-NEXT: enable_sgpr_private_segment_buffer = 1
|
||||
; GCN-NEXT: enable_sgpr_dispatch_ptr = 1
|
||||
; GCN-NEXT: enable_sgpr_queue_ptr = 1
|
||||
; GCN-NEXT: enable_sgpr_kernarg_segment_ptr = 1
|
||||
; GCN-NEXT: enable_sgpr_dispatch_id = 1
|
||||
; GCN-NEXT: enable_sgpr_flat_scratch_init = 1
|
||||
; GCN-NEXT: enable_sgpr_private_segment_size = 0
|
||||
; GCN-NEXT: enable_sgpr_grid_workgroup_count_x = 0
|
||||
; GCN-NEXT: enable_sgpr_grid_workgroup_count_y = 0
|
||||
; GCN-NEXT: enable_sgpr_grid_workgroup_count_z = 0
|
||||
; GCN-NEXT: enable_wavefront_size32 = 0
|
||||
; GCN-NEXT: enable_ordered_append_gds = 0
|
||||
; GCN-NEXT: private_element_size = 1
|
||||
; GCN-NEXT: is_ptr64 = 1
|
||||
; GCN-NEXT: is_dynamic_callstack = 1
|
||||
; GCN-NEXT: is_debug_enabled = 0
|
||||
; GCN-NEXT: is_xnack_enabled = 1
|
||||
; GCN-NEXT: workitem_private_segment_byte_size = 16384
|
||||
; GCN-NEXT: workgroup_group_segment_byte_size = 0
|
||||
; GCN-NEXT: gds_segment_byte_size = 0
|
||||
; GCN-NEXT: kernarg_segment_byte_size = 0
|
||||
; GCN-NEXT: workgroup_fbarrier_count = 0
|
||||
; GCN-NEXT: wavefront_sgpr_count = 48
|
||||
; GCN-NEXT: workitem_vgpr_count = 32
|
||||
; GCN-NEXT: reserved_vgpr_first = 0
|
||||
; GCN-NEXT: reserved_vgpr_count = 0
|
||||
; GCN-NEXT: reserved_sgpr_first = 0
|
||||
; GCN-NEXT: reserved_sgpr_count = 0
|
||||
; GCN-NEXT: debug_wavefront_private_segment_offset_sgpr = 0
|
||||
; GCN-NEXT: debug_private_segment_buffer_sgpr = 0
|
||||
; GCN-NEXT: kernarg_segment_alignment = 4
|
||||
; GCN-NEXT: group_segment_alignment = 4
|
||||
; GCN-NEXT: private_segment_alignment = 4
|
||||
; GCN-NEXT: wavefront_size = 6
|
||||
; GCN-NEXT: call_convention = -1
|
||||
; GCN-NEXT: runtime_loader_kernel_symbol = 0
|
||||
; GCN-NEXT: .end_amd_kernel_code_t
|
||||
; GCN-NEXT: ; %bb.0:
|
||||
; GCN-NEXT: s_mov_b32 s33, s17
|
||||
; GCN-NEXT: s_mov_b32 s32, s33
|
||||
; GCN-NEXT: s_mov_b32 flat_scratch_lo, s13
|
||||
; GCN-NEXT: s_add_u32 s12, s12, s33
|
||||
; GCN-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
|
||||
; GCN-NEXT: s_getpc_b64 s[4:5]
|
||||
; GCN-NEXT: s_add_u32 s4, s4, gv.fptr1@rel32@lo+4
|
||||
; GCN-NEXT: s_addc_u32 s5, s5, gv.fptr1@rel32@hi+4
|
||||
; GCN-NEXT: v_lshlrev_b32_e32 v2, 20, v2
|
||||
; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
|
||||
; GCN-NEXT: v_lshlrev_b32_e32 v1, 10, v1
|
||||
; GCN-NEXT: v_or_b32_e32 v0, v0, v1
|
||||
; GCN-NEXT: v_or_b32_e32 v31, v0, v2
|
||||
; GCN-NEXT: v_mov_b32_e32 v0, 0x7b
|
||||
; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; GCN-NEXT: s_swappc_b64 s[30:31], s[4:5]
|
||||
; GCN-NEXT: s_endpgm
|
||||
%fptr = load void(i32)*, void(i32)* addrspace(4)* @gv.fptr1
|
||||
call void %fptr(i32 123)
|
||||
ret void
|
||||
}
|
||||
|
||||
; FIXME
|
||||
; define void @test_indirect_call_vgpr_ptr(void()* %fptr) {
|
||||
; call void %fptr()
|
||||
; ret void
|
||||
; }
|
||||
|
||||
; define void @test_indirect_call_vgpr_ptr_arg(void(i32)* %fptr) {
|
||||
; call void %fptr(i32 123)
|
||||
; ret void
|
||||
; }
|
Loading…
Reference in New Issue