AMDGPU: Fix capitalized register names in asm constraints

This was a workaround a long time ago, but the canonical lower case
names work now.

llvm-svn: 363459
This commit is contained in:
Matt Arsenault 2019-06-14 21:16:06 +00:00
parent f4335b8e3c
commit bb0a610599
12 changed files with 20 additions and 20 deletions

View File

@ -111,8 +111,8 @@ entry:
%sgpr101 = tail call i32 asm sideeffect "s_mov_b32 s101, 0", "={s101}"() #0 %sgpr101 = tail call i32 asm sideeffect "s_mov_b32 s101, 0", "={s101}"() #0
%sgpr102 = tail call i32 asm sideeffect "s_mov_b32 s102, 0", "={s102}"() #0 %sgpr102 = tail call i32 asm sideeffect "s_mov_b32 s102, 0", "={s102}"() #0
%sgpr103 = tail call i32 asm sideeffect "s_mov_b32 s103, 0", "={s103}"() #0 %sgpr103 = tail call i32 asm sideeffect "s_mov_b32 s103, 0", "={s103}"() #0
%vcc_lo = tail call i32 asm sideeffect "s_mov_b32 $0, 0", "={VCC_LO}"() #0 %vcc_lo = tail call i32 asm sideeffect "s_mov_b32 $0, 0", "={vcc_lo}"() #0
%vcc_hi = tail call i32 asm sideeffect "s_mov_b32 $0, 0", "={VCC_HI}"() #0 %vcc_hi = tail call i32 asm sideeffect "s_mov_b32 $0, 0", "={vcc_hi}"() #0
%cmp = icmp eq i32 %cnd, 0 %cmp = icmp eq i32 %cnd, 0
br i1 %cmp, label %bb3, label %bb2 ; +8 dword branch br i1 %cmp, label %bb3, label %bb2 ; +8 dword branch

View File

@ -85,7 +85,7 @@ define void @void_func_void_clobber_s30_s31() #2 {
; GCN-NEXT: ;;#ASMEND ; GCN-NEXT: ;;#ASMEND
; GCN-NEXT: s_setpc_b64 s[30:31] ; GCN-NEXT: s_setpc_b64 s[30:31]
define hidden void @void_func_void_clobber_vcc() #2 { define hidden void @void_func_void_clobber_vcc() #2 {
call void asm sideeffect "", "~{VCC}"() #0 call void asm sideeffect "", "~{vcc}"() #0
ret void ret void
} }

View File

@ -65,7 +65,7 @@ entry:
br i1 %cc, label %if, label %endif br i1 %cc, label %if, label %endif
if: if:
call void asm "; clobber $0", "~{VCC}"() #0 call void asm "; clobber $0", "~{vcc}"() #0
%u = add i32 %v, %v %u = add i32 %v, %v
br label %endif br label %endif

View File

@ -218,11 +218,11 @@ define void @func_other_fi_user_non_inline_imm_offset_i32() #0 {
define void @func_other_fi_user_non_inline_imm_offset_i32_vcc_live() #0 { define void @func_other_fi_user_non_inline_imm_offset_i32_vcc_live() #0 {
%alloca0 = alloca [128 x i32], align 4, addrspace(5) %alloca0 = alloca [128 x i32], align 4, addrspace(5)
%alloca1 = alloca [8 x i32], align 4, addrspace(5) %alloca1 = alloca [8 x i32], align 4, addrspace(5)
%vcc = call i64 asm sideeffect "; def $0", "={VCC}"() %vcc = call i64 asm sideeffect "; def $0", "={vcc}"()
%gep0 = getelementptr inbounds [128 x i32], [128 x i32] addrspace(5)* %alloca0, i32 0, i32 65 %gep0 = getelementptr inbounds [128 x i32], [128 x i32] addrspace(5)* %alloca0, i32 0, i32 65
%gep1 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %alloca1, i32 0, i32 0 %gep1 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %alloca1, i32 0, i32 0
store volatile i32 7, i32 addrspace(5)* %gep0 store volatile i32 7, i32 addrspace(5)* %gep0
call void asm sideeffect "; use $0", "{VCC}"(i64 %vcc) call void asm sideeffect "; use $0", "{vcc}"(i64 %vcc)
%ptrtoint = ptrtoint i32 addrspace(5)* %gep1 to i32 %ptrtoint = ptrtoint i32 addrspace(5)* %gep1 to i32
%mul = mul i32 %ptrtoint, 9 %mul = mul i32 %ptrtoint, 9
store volatile i32 %mul, i32 addrspace(3)* undef store volatile i32 %mul, i32 addrspace(3)* undef

View File

@ -36,7 +36,7 @@ entry:
; GCN: s_mov_b32 [[COPY_M0:s[0-9]+]], m0 ; GCN: s_mov_b32 [[COPY_M0:s[0-9]+]], m0
; GCN: ; use [[COPY_M0]] ; GCN: ; use [[COPY_M0]]
define amdgpu_kernel void @inline_sreg_constraint_m0() { define amdgpu_kernel void @inline_sreg_constraint_m0() {
%m0 = tail call i32 asm sideeffect "s_mov_b32 m0, -1", "={M0}"() %m0 = tail call i32 asm sideeffect "s_mov_b32 m0, -1", "={m0}"()
tail call void asm sideeffect "; use $0", "s"(i32 %m0) tail call void asm sideeffect "; use $0", "s"(i32 %m0)
ret void ret void
} }

View File

@ -106,7 +106,7 @@ define amdgpu_ps half @interp_p1_m0_setup(float inreg %i, float inreg %j, i32 in
; GFX8-16BANK-NEXT: v_add_f16_e32 v0, s3, v0 ; GFX8-16BANK-NEXT: v_add_f16_e32 v0, s3, v0
; GFX8-16BANK-NEXT: ; return to shader part epilog ; GFX8-16BANK-NEXT: ; return to shader part epilog
main_body: main_body:
%mx = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0 %mx = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
%p1_0 = call float @llvm.amdgcn.interp.p1.f16(float %i, i32 1, i32 2, i1 0, i32 %m0) %p1_0 = call float @llvm.amdgcn.interp.p1.f16(float %i, i32 1, i32 2, i1 0, i32 %m0)
%p2_0 = call half @llvm.amdgcn.interp.p2.f16(float %p1_0, float %j, i32 1, i32 2, i1 0, i32 %m0) %p2_0 = call half @llvm.amdgcn.interp.p2.f16(float %p1_0, float %j, i32 1, i32 2, i1 0, i32 %m0)
%my = trunc i32 %mx to i16 %my = trunc i32 %mx to i16
@ -170,7 +170,7 @@ define amdgpu_ps half @interp_p2_m0_setup(float inreg %i, float inreg %j, i32 in
; GFX8-16BANK-NEXT: ; return to shader part epilog ; GFX8-16BANK-NEXT: ; return to shader part epilog
main_body: main_body:
%p1_0 = call float @llvm.amdgcn.interp.p1.f16(float %i, i32 1, i32 2, i1 0, i32 %m0) %p1_0 = call float @llvm.amdgcn.interp.p1.f16(float %i, i32 1, i32 2, i1 0, i32 %m0)
%mx = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0 %mx = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
%p2_0 = call half @llvm.amdgcn.interp.p2.f16(float %p1_0, float %j, i32 1, i32 2, i1 0, i32 %m0) %p2_0 = call half @llvm.amdgcn.interp.p2.f16(float %p1_0, float %j, i32 1, i32 2, i1 0, i32 %m0)
%my = trunc i32 %mx to i16 %my = trunc i32 %mx to i16
%mh = bitcast i16 %my to half %mh = bitcast i16 %my to half

View File

@ -26,7 +26,7 @@ define amdgpu_kernel void @test_readfirstlane_imm(i32 addrspace(1)* %out) #1 {
; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], [[COPY_M0]] ; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], [[COPY_M0]]
; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, [[VVAL]] ; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, [[VVAL]]
define amdgpu_kernel void @test_readfirstlane_m0(i32 addrspace(1)* %out) #1 { define amdgpu_kernel void @test_readfirstlane_m0(i32 addrspace(1)* %out) #1 {
%m0 = call i32 asm "s_mov_b32 m0, -1", "={M0}"() %m0 = call i32 asm "s_mov_b32 m0, -1", "={m0}"()
%readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %m0) %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %m0)
store i32 %readfirstlane, i32 addrspace(1)* %out, align 4 store i32 %readfirstlane, i32 addrspace(1)* %out, align 4
ret void ret void

View File

@ -40,7 +40,7 @@ define amdgpu_kernel void @test_readlane_vregs(i32 addrspace(1)* %out, <2 x i32>
; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], [[COPY_M0]] ; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], [[COPY_M0]]
; CHECK: v_readlane_b32 s{{[0-9]+}}, [[VVAL]], s{{[0-9]+}} ; CHECK: v_readlane_b32 s{{[0-9]+}}, [[VVAL]], s{{[0-9]+}}
define amdgpu_kernel void @test_readlane_m0_sreg(i32 addrspace(1)* %out, i32 %src1) #1 { define amdgpu_kernel void @test_readlane_m0_sreg(i32 addrspace(1)* %out, i32 %src1) #1 {
%m0 = call i32 asm "s_mov_b32 m0, -1", "={M0}"() %m0 = call i32 asm "s_mov_b32 m0, -1", "={m0}"()
%readlane = call i32 @llvm.amdgcn.readlane(i32 %m0, i32 %src1) %readlane = call i32 @llvm.amdgcn.readlane(i32 %m0, i32 %src1)
store i32 %readlane, i32 addrspace(1)* %out, align 4 store i32 %readlane, i32 addrspace(1)* %out, align 4
ret void ret void

View File

@ -42,7 +42,7 @@ define amdgpu_kernel void @test_writelane_vreg_lane(i32 addrspace(1)* %out, <2 x
; CHECK: v_writelane_b32 v{{[0-9]+}}, [[COPY_M0]], s{{[0-9]+}} ; CHECK: v_writelane_b32 v{{[0-9]+}}, [[COPY_M0]], s{{[0-9]+}}
define amdgpu_kernel void @test_writelane_m0_sreg(i32 addrspace(1)* %out, i32 %src1) #1 { define amdgpu_kernel void @test_writelane_m0_sreg(i32 addrspace(1)* %out, i32 %src1) #1 {
%oldval = load i32, i32 addrspace(1)* %out %oldval = load i32, i32 addrspace(1)* %out
%m0 = call i32 asm "s_mov_b32 m0, -1", "={M0}"() %m0 = call i32 asm "s_mov_b32 m0, -1", "={m0}"()
%writelane = call i32 @llvm.amdgcn.writelane(i32 %m0, i32 %src1, i32 %oldval) %writelane = call i32 @llvm.amdgcn.writelane(i32 %m0, i32 %src1, i32 %oldval)
store i32 %writelane, i32 addrspace(1)* %out, align 4 store i32 %writelane, i32 addrspace(1)* %out, align 4
ret void ret void

View File

@ -43,7 +43,7 @@
; GCN: s_add_i32 s{{[0-9]+}}, m0, 1 ; GCN: s_add_i32 s{{[0-9]+}}, m0, 1
define amdgpu_kernel void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 { define amdgpu_kernel void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 {
entry: entry:
%m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
%cmp0 = icmp eq i32 %cond, 0 %cmp0 = icmp eq i32 %cond, 0
br i1 %cmp0, label %if, label %endif br i1 %cmp0, label %if, label %endif
@ -52,7 +52,7 @@ if:
br label %endif br label %endif
endif: endif:
%foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{M0}"(i32 %m0) #0 %foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{m0}"(i32 %m0) #0
store i32 %foo, i32 addrspace(1)* %out store i32 %foo, i32 addrspace(1)* %out
ret void ret void
} }
@ -138,9 +138,9 @@ endif: ; preds = %else, %if
; GCN-NOT: s_buffer_load_dword m0 ; GCN-NOT: s_buffer_load_dword m0
define amdgpu_kernel void @m0_unavailable_spill(i32 %m0.arg) #0 { define amdgpu_kernel void @m0_unavailable_spill(i32 %m0.arg) #0 {
main_body: main_body:
%m0 = call i32 asm sideeffect "; def $0, 1", "={M0}"() #0 %m0 = call i32 asm sideeffect "; def $0, 1", "={m0}"() #0
%tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0.arg) %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0.arg)
call void asm sideeffect "; clobber $0", "~{M0}"() #0 call void asm sideeffect "; clobber $0", "~{m0}"() #0
%cmp = fcmp ueq float 0.000000e+00, %tmp %cmp = fcmp ueq float 0.000000e+00, %tmp
br i1 %cmp, label %if, label %else br i1 %cmp, label %if, label %else
@ -191,14 +191,14 @@ endif:
; TOSMEM: s_dcache_wb ; TOSMEM: s_dcache_wb
; TOSMEM: s_endpgm ; TOSMEM: s_endpgm
define amdgpu_kernel void @restore_m0_lds(i32 %arg) { define amdgpu_kernel void @restore_m0_lds(i32 %arg) {
%m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
%sval = load volatile i64, i64 addrspace(4)* undef %sval = load volatile i64, i64 addrspace(4)* undef
%cmp = icmp eq i32 %arg, 0 %cmp = icmp eq i32 %arg, 0
br i1 %cmp, label %ret, label %bb br i1 %cmp, label %ret, label %bb
bb: bb:
store volatile i64 %sval, i64 addrspace(3)* undef store volatile i64 %sval, i64 addrspace(3)* undef
call void asm sideeffect "; use $0", "{M0}"(i32 %m0) #0 call void asm sideeffect "; use $0", "{m0}"(i32 %m0) #0
br label %ret br label %ret
ret: ret:

View File

@ -83,7 +83,7 @@ define amdgpu_kernel void @v_uaddo_i32_novcc(i32 addrspace(1)* %out, i1 addrspac
%val = extractvalue { i32, i1 } %uadd, 0 %val = extractvalue { i32, i1 } %uadd, 0
%carry = extractvalue { i32, i1 } %uadd, 1 %carry = extractvalue { i32, i1 } %uadd, 1
store volatile i32 %val, i32 addrspace(1)* %out, align 4 store volatile i32 %val, i32 addrspace(1)* %out, align 4
call void asm sideeffect "", "~{VCC}"() #0 call void asm sideeffect "", "~{vcc}"() #0
store volatile i1 %carry, i1 addrspace(1)* %carryout store volatile i1 %carry, i1 addrspace(1)* %carryout
ret void ret void
} }

View File

@ -84,7 +84,7 @@ define amdgpu_kernel void @v_usubo_i32_novcc(i32 addrspace(1)* %out, i1 addrspac
%val = extractvalue { i32, i1 } %uadd, 0 %val = extractvalue { i32, i1 } %uadd, 0
%carry = extractvalue { i32, i1 } %uadd, 1 %carry = extractvalue { i32, i1 } %uadd, 1
store volatile i32 %val, i32 addrspace(1)* %out, align 4 store volatile i32 %val, i32 addrspace(1)* %out, align 4
call void asm sideeffect "", "~{VCC}"() #0 call void asm sideeffect "", "~{vcc}"() #0
store volatile i1 %carry, i1 addrspace(1)* %carryout store volatile i1 %carry, i1 addrspace(1)* %carryout
ret void ret void
} }