2016-06-21 01:51:32 +08:00
|
|
|
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
|
2017-01-25 06:02:15 +08:00
|
|
|
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck %s
|
2014-12-03 12:08:00 +08:00
|
|
|
|
2016-06-21 01:51:32 +08:00
|
|
|
; CHECK-LABEL: {{^}}inline_asm:
|
2014-12-03 12:08:00 +08:00
|
|
|
; CHECK: s_endpgm
|
|
|
|
; CHECK: s_endpgm
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @inline_asm(i32 addrspace(1)* %out) {
|
2014-12-03 12:08:00 +08:00
|
|
|
entry:
|
|
|
|
store i32 5, i32 addrspace(1)* %out
|
|
|
|
call void asm sideeffect "s_endpgm", ""()
|
|
|
|
ret void
|
|
|
|
}
|
2016-01-07 06:01:04 +08:00
|
|
|
|
2016-06-21 01:51:32 +08:00
|
|
|
; CHECK-LABEL: {{^}}inline_asm_shader:
|
2016-01-07 06:01:04 +08:00
|
|
|
; CHECK: s_endpgm
|
|
|
|
; CHECK: s_endpgm
|
2016-04-07 03:40:20 +08:00
|
|
|
define amdgpu_ps void @inline_asm_shader() {
|
2016-01-07 06:01:04 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "s_endpgm", ""()
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2016-02-13 07:45:29 +08:00
|
|
|
|
|
|
|
; CHECK: {{^}}branch_on_asm:
|
|
|
|
; Make sure inline assembly is treted as divergent.
|
|
|
|
; CHECK: s_mov_b32 s{{[0-9]+}}, 0
|
|
|
|
; CHECK: s_and_saveexec_b64
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @branch_on_asm(i32 addrspace(1)* %out) {
|
2016-02-13 07:45:29 +08:00
|
|
|
%zero = call i32 asm "s_mov_b32 $0, 0", "=s"()
|
|
|
|
%cmp = icmp eq i32 %zero, 0
|
|
|
|
br i1 %cmp, label %if, label %endif
|
|
|
|
|
|
|
|
if:
|
|
|
|
store i32 0, i32 addrspace(1)* %out
|
|
|
|
br label %endif
|
|
|
|
|
|
|
|
endif:
|
|
|
|
ret void
|
|
|
|
}
|
2016-03-10 00:02:52 +08:00
|
|
|
|
2016-06-21 01:51:32 +08:00
|
|
|
; CHECK-LABEL: {{^}}v_cmp_asm:
|
2016-03-10 00:02:52 +08:00
|
|
|
; CHECK: v_mov_b32_e32 [[SRC:v[0-9]+]], s{{[0-9]+}}
|
2016-09-30 09:50:20 +08:00
|
|
|
; CHECK: v_cmp_ne_u32_e64 s{{\[}}[[MASK_LO:[0-9]+]]:[[MASK_HI:[0-9]+]]{{\]}}, 0, [[SRC]]
|
2016-03-10 00:02:52 +08:00
|
|
|
; CHECK-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[MASK_LO]]
|
|
|
|
; CHECK-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[MASK_HI]]
|
|
|
|
; CHECK: buffer_store_dwordx2 v{{\[}}[[V_LO]]:[[V_HI]]{{\]}}
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @v_cmp_asm(i64 addrspace(1)* %out, i32 %in) {
|
2016-09-30 09:50:20 +08:00
|
|
|
%sgpr = tail call i64 asm "v_cmp_ne_u32_e64 $0, 0, $1", "=s,v"(i32 %in)
|
2016-03-10 00:02:52 +08:00
|
|
|
store i64 %sgpr, i64 addrspace(1)* %out
|
|
|
|
ret void
|
|
|
|
}
|
2016-06-21 01:51:32 +08:00
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}code_size_inline_asm:
|
|
|
|
; CHECK: codeLenInByte = 12
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @code_size_inline_asm(i32 addrspace(1)* %out) {
|
2016-06-21 01:51:32 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "v_nop_e64", ""()
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; All inlineasm instructions are assumed to be the maximum size
|
|
|
|
; CHECK-LABEL: {{^}}code_size_inline_asm_small_inst:
|
|
|
|
; CHECK: codeLenInByte = 12
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @code_size_inline_asm_small_inst(i32 addrspace(1)* %out) {
|
2016-06-21 01:51:32 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "v_nop_e32", ""()
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}code_size_inline_asm_2_inst:
|
|
|
|
; CHECK: codeLenInByte = 20
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @code_size_inline_asm_2_inst(i32 addrspace(1)* %out) {
|
2016-06-21 01:51:32 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "
|
|
|
|
v_nop_e64
|
|
|
|
v_nop_e64
|
|
|
|
", ""()
|
|
|
|
ret void
|
|
|
|
}
|
2016-07-02 07:26:50 +08:00
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}code_size_inline_asm_2_inst_extra_newline:
|
|
|
|
; CHECK: codeLenInByte = 20
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @code_size_inline_asm_2_inst_extra_newline(i32 addrspace(1)* %out) {
|
2016-07-02 07:26:50 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "
|
|
|
|
v_nop_e64
|
|
|
|
|
|
|
|
v_nop_e64
|
|
|
|
", ""()
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}code_size_inline_asm_0_inst:
|
|
|
|
; CHECK: codeLenInByte = 4
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @code_size_inline_asm_0_inst(i32 addrspace(1)* %out) {
|
2016-07-02 07:26:50 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "", ""()
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}code_size_inline_asm_1_comment:
|
|
|
|
; CHECK: codeLenInByte = 4
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @code_size_inline_asm_1_comment(i32 addrspace(1)* %out) {
|
2016-07-02 07:26:50 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "; comment", ""()
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}code_size_inline_asm_newline_1_comment:
|
|
|
|
; CHECK: codeLenInByte = 4
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @code_size_inline_asm_newline_1_comment(i32 addrspace(1)* %out) {
|
2016-07-02 07:26:50 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "
|
|
|
|
; comment", ""()
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}code_size_inline_asm_1_comment_newline:
|
|
|
|
; CHECK: codeLenInByte = 4
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @code_size_inline_asm_1_comment_newline(i32 addrspace(1)* %out) {
|
2016-07-02 07:26:50 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "; comment
|
|
|
|
", ""()
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}code_size_inline_asm_2_comments_line:
|
|
|
|
; CHECK: codeLenInByte = 4
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @code_size_inline_asm_2_comments_line(i32 addrspace(1)* %out) {
|
2016-07-02 07:26:50 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "; first comment ; second comment", ""()
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}code_size_inline_asm_2_comments_line_nospace:
|
|
|
|
; CHECK: codeLenInByte = 4
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @code_size_inline_asm_2_comments_line_nospace(i32 addrspace(1)* %out) {
|
2016-07-02 07:26:50 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "; first comment;second comment", ""()
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}code_size_inline_asm_mixed_comments0:
|
|
|
|
; CHECK: codeLenInByte = 20
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @code_size_inline_asm_mixed_comments0(i32 addrspace(1)* %out) {
|
2016-07-02 07:26:50 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "; comment
|
|
|
|
v_nop_e64 ; inline comment
|
|
|
|
; separate comment
|
|
|
|
v_nop_e64
|
|
|
|
|
|
|
|
; trailing comment
|
|
|
|
; extra comment
|
|
|
|
", ""()
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}code_size_inline_asm_mixed_comments1:
|
|
|
|
; CHECK: codeLenInByte = 20
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @code_size_inline_asm_mixed_comments1(i32 addrspace(1)* %out) {
|
2016-07-02 07:26:50 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "v_nop_e64 ; inline comment
|
|
|
|
; separate comment
|
|
|
|
v_nop_e64
|
|
|
|
|
|
|
|
; trailing comment
|
|
|
|
; extra comment
|
|
|
|
", ""()
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}code_size_inline_asm_mixed_comments_operands:
|
|
|
|
; CHECK: codeLenInByte = 20
|
2017-03-22 05:39:51 +08:00
|
|
|
define amdgpu_kernel void @code_size_inline_asm_mixed_comments_operands(i32 addrspace(1)* %out) {
|
2016-07-02 07:26:50 +08:00
|
|
|
entry:
|
|
|
|
call void asm sideeffect "; comment
|
|
|
|
v_add_i32_e32 v0, vcc, v1, v2 ; inline comment
|
|
|
|
; separate comment
|
|
|
|
v_bfrev_b32_e32 v0, 1
|
|
|
|
|
|
|
|
; trailing comment
|
|
|
|
; extra comment
|
|
|
|
", ""()
|
|
|
|
ret void
|
|
|
|
}
|
2017-04-12 06:29:19 +08:00
|
|
|
|
|
|
|
; FIXME: Should not have intermediate sgprs
|
|
|
|
; CHECK-LABEL: {{^}}i64_imm_input_phys_vgpr:
|
2018-10-30 23:04:40 +08:00
|
|
|
; CHECK-DAG: s_mov_b32 s1, 0
|
|
|
|
; CHECK-DAG: s_mov_b32 s0, 0x1e240
|
2017-04-12 06:29:19 +08:00
|
|
|
; CHECK: v_mov_b32_e32 v0, s0
|
|
|
|
; CHECK: v_mov_b32_e32 v1, s1
|
|
|
|
; CHECK: use v[0:1]
|
2017-05-18 05:56:25 +08:00
|
|
|
define amdgpu_kernel void @i64_imm_input_phys_vgpr() {
|
2017-04-12 06:29:19 +08:00
|
|
|
entry:
|
2017-06-09 03:03:20 +08:00
|
|
|
call void asm sideeffect "; use $0 ", "{v[0:1]}"(i64 123456)
|
2017-04-12 06:29:19 +08:00
|
|
|
ret void
|
|
|
|
}
|
2017-04-13 05:58:23 +08:00
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}i1_imm_input_phys_vgpr:
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
; CHECK: s_mov_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], -1
|
|
|
|
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, [[MASK]]
|
2017-04-13 05:58:23 +08:00
|
|
|
; CHECK: ; use v0
|
|
|
|
define amdgpu_kernel void @i1_imm_input_phys_vgpr() {
|
|
|
|
entry:
|
2017-06-09 03:03:20 +08:00
|
|
|
call void asm sideeffect "; use $0 ", "{v0}"(i1 true)
|
2017-04-13 05:58:23 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}i1_input_phys_vgpr:
|
|
|
|
; CHECK: {{buffer|flat}}_load_ubyte [[LOAD:v[0-9]+]]
|
|
|
|
; CHECK: v_and_b32_e32 [[LOAD]], 1, [[LOAD]]
|
|
|
|
; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, 1, [[LOAD]]
|
|
|
|
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
|
|
|
|
; CHECK: ; use v0
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
; CHECK: v_cmp_ne_u32_e32 vcc, 0, v1
|
|
|
|
; CHECK: v_cndmask_b32_e64 [[STORE:v[0-9]+]], 0, 1, vcc
|
|
|
|
; CHECK: {{buffer|flat}}_store_byte [[STORE]],
|
2017-04-13 05:58:23 +08:00
|
|
|
define amdgpu_kernel void @i1_input_phys_vgpr() {
|
|
|
|
entry:
|
|
|
|
%val = load i1, i1 addrspace(1)* undef
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
%cc = call i1 asm sideeffect "; use $1, def $0 ", "={v1}, {v0}"(i1 %val)
|
|
|
|
store i1 %cc, i1 addrspace(1)* undef
|
2017-04-13 05:58:23 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; FIXME: Should be scheduled to shrink vcc
|
|
|
|
; CHECK-LABEL: {{^}}i1_input_phys_vgpr_x2:
|
|
|
|
; CHECK: v_cmp_eq_u32_e32 vcc, 1, v0
|
|
|
|
; CHECK: v_cndmask_b32_e64 v0, 0, -1, vcc
|
2017-07-07 04:57:05 +08:00
|
|
|
; CHECK: v_cmp_eq_u32_e32 vcc, 1, v1
|
|
|
|
; CHECK: v_cndmask_b32_e64 v1, 0, -1, vcc
|
2017-04-13 05:58:23 +08:00
|
|
|
define amdgpu_kernel void @i1_input_phys_vgpr_x2() {
|
|
|
|
entry:
|
|
|
|
%val0 = load volatile i1, i1 addrspace(1)* undef
|
|
|
|
%val1 = load volatile i1, i1 addrspace(1)* undef
|
2017-06-09 03:03:20 +08:00
|
|
|
call void asm sideeffect "; use $0 $1 ", "{v0}, {v1}"(i1 %val0, i1 %val1)
|
2017-04-13 05:58:23 +08:00
|
|
|
ret void
|
|
|
|
}
|
2017-04-29 09:26:34 +08:00
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}muliple_def_phys_vgpr:
|
|
|
|
; CHECK: ; def v0
|
|
|
|
; CHECK: v_mov_b32_e32 v1, v0
|
|
|
|
; CHECK: ; def v0
|
|
|
|
; CHECK: v_lshlrev_b32_e32 v{{[0-9]+}}, v0, v1
|
|
|
|
define amdgpu_kernel void @muliple_def_phys_vgpr() {
|
|
|
|
entry:
|
2017-06-09 03:03:20 +08:00
|
|
|
%def0 = call i32 asm sideeffect "; def $0 ", "={v0}"()
|
|
|
|
%def1 = call i32 asm sideeffect "; def $0 ", "={v0}"()
|
2017-04-29 09:26:34 +08:00
|
|
|
%add = shl i32 %def0, %def1
|
|
|
|
store i32 %add, i32 addrspace(1)* undef
|
|
|
|
ret void
|
|
|
|
}
|
2017-08-10 04:09:35 +08:00
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}asm_constraint_c_n:
|
|
|
|
; CHECK: s_trap 10{{$}}
|
|
|
|
define amdgpu_kernel void @asm_constraint_c_n() {
|
|
|
|
entry:
|
|
|
|
tail call void asm sideeffect "s_trap ${0:c}", "n"(i32 10) #1
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: {{^}}asm_constraint_n_n:
|
|
|
|
; CHECK: s_trap -10{{$}}
|
|
|
|
define amdgpu_kernel void @asm_constraint_n_n() {
|
|
|
|
entry:
|
|
|
|
tail call void asm sideeffect "s_trap ${0:n}", "n"(i32 10) #1
|
|
|
|
ret void
|
|
|
|
}
|
2019-04-16 04:42:18 +08:00
|
|
|
|
|
|
|
; Make sure tuples of 3 SGPRs are printed with the [] syntax instead
|
|
|
|
; of the tablegen default.
|
|
|
|
; CHECK-LABEL: {{^}}sgpr96_name_format:
|
|
|
|
; CHECK: ; sgpr96 s[0:2]
|
|
|
|
define amdgpu_kernel void @sgpr96_name_format() {
|
|
|
|
entry:
|
|
|
|
tail call void asm sideeffect "; sgpr96 $0", "s"(<3 x i32> <i32 10, i32 11, i32 12>) #1
|
|
|
|
ret void
|
|
|
|
}
|
[AMDGPU] Correct the handling of inlineasm output registers.
Summary:
- There's a regression due to the cross-block RC assignment. Use the
proper way to derive the output register RC in inline asm.
Reviewers: rampitec, alex-t
Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, dstuttard, tpr, t-tye, eraman, hiraditya, llvm-commits, yaxunl
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62537
llvm-svn: 361868
2019-05-29 03:37:09 +08:00
|
|
|
|
|
|
|
; Check aggregate types are handled properly.
|
|
|
|
; CHECK-LABEL: mad_u64
|
|
|
|
; CHECK: v_mad_u64_u32
|
|
|
|
define void @mad_u64(i32 %x) {
|
|
|
|
entry:
|
|
|
|
br i1 undef, label %exit, label %false
|
|
|
|
|
|
|
|
false:
|
|
|
|
%s0 = tail call { i64, i64 } asm sideeffect "v_mad_u64_u32 $0, $1, $2, $3, $4", "=v,=s,v,v,v"(i32 -766435501, i32 %x, i64 0)
|
|
|
|
br label %exit
|
|
|
|
|
|
|
|
exit:
|
|
|
|
%s1 = phi { i64, i64} [ undef, %entry ], [ %s0, %false]
|
|
|
|
%v0 = extractvalue { i64, i64 } %s1, 0
|
|
|
|
%v1 = extractvalue { i64, i64 } %s1, 1
|
|
|
|
tail call void asm sideeffect "; use $0", "v"(i64 %v0)
|
|
|
|
tail call void asm sideeffect "; use $0", "v"(i64 %v1)
|
|
|
|
ret void
|
|
|
|
}
|