AMDGPU: Add more mad_64_32 test cases

Test the behavior when a MUL is used multiple times, as well as when it
is uniform.

Run the tests for gfx9 as well, which added S_MUL_HI_[IU]32.
This commit is contained in:
Nicolai Hähnle 2022-04-14 23:50:57 -05:00
parent 5232c5c5d4
commit f097088b05
1 changed files with 214 additions and 0 deletions

View File

@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=CI %s ; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=CI %s
; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=SI %s ; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=SI %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
define i64 @mad_i64_i32_sextops(i32 %arg0, i32 %arg1, i64 %arg2) #0 { define i64 @mad_i64_i32_sextops(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
; CI-LABEL: mad_i64_i32_sextops: ; CI-LABEL: mad_i64_i32_sextops:
@ -17,6 +18,12 @@ define i64 @mad_i64_i32_sextops(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
; SI-NEXT: v_add_i32_e32 v0, vcc, v4, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, v4, v2
; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc ; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_i64_i32_sextops:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v0, v1, v[2:3]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%sext0 = sext i32 %arg0 to i64 %sext0 = sext i32 %arg0 to i64
%sext1 = sext i32 %arg1 to i64 %sext1 = sext i32 %arg1 to i64
%mul = mul i64 %sext0, %sext1 %mul = mul i64 %sext0, %sext1
@ -39,6 +46,12 @@ define i64 @mad_i64_i32_sextops_commute(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
; SI-NEXT: v_add_i32_e32 v0, vcc, v2, v4 ; SI-NEXT: v_add_i32_e32 v0, vcc, v2, v4
; SI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc ; SI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_i64_i32_sextops_commute:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v0, v1, v[2:3]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%sext0 = sext i32 %arg0 to i64 %sext0 = sext i32 %arg0 to i64
%sext1 = sext i32 %arg1 to i64 %sext1 = sext i32 %arg1 to i64
%mul = mul i64 %sext0, %sext1 %mul = mul i64 %sext0, %sext1
@ -61,6 +74,12 @@ define i64 @mad_u64_u32_zextops(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
; SI-NEXT: v_add_i32_e32 v0, vcc, v4, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, v4, v2
; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc ; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_u64_u32_zextops:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v0, v1, v[2:3]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%sext0 = zext i32 %arg0 to i64 %sext0 = zext i32 %arg0 to i64
%sext1 = zext i32 %arg1 to i64 %sext1 = zext i32 %arg1 to i64
%mul = mul i64 %sext0, %sext1 %mul = mul i64 %sext0, %sext1
@ -83,6 +102,12 @@ define i64 @mad_u64_u32_zextops_commute(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
; SI-NEXT: v_add_i32_e32 v0, vcc, v2, v4 ; SI-NEXT: v_add_i32_e32 v0, vcc, v2, v4
; SI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc ; SI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_u64_u32_zextops_commute:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v0, v1, v[2:3]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%sext0 = zext i32 %arg0 to i64 %sext0 = zext i32 %arg0 to i64
%sext1 = zext i32 %arg1 to i64 %sext1 = zext i32 %arg1 to i64
%mul = mul i64 %sext0, %sext1 %mul = mul i64 %sext0, %sext1
@ -148,6 +173,31 @@ define i128 @mad_i64_i32_sextops_i32_i128(i32 %arg0, i32 %arg1, i128 %arg2) #0 {
; SI-NEXT: v_addc_u32_e32 v2, vcc, v7, v4, vcc ; SI-NEXT: v_addc_u32_e32 v2, vcc, v7, v4, vcc
; SI-NEXT: v_addc_u32_e32 v3, vcc, v6, v5, vcc ; SI-NEXT: v_addc_u32_e32 v3, vcc, v6, v5, vcc
; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_i64_i32_sextops_i32_i128:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v0, v1, 0
; GFX9-NEXT: v_ashrrev_i32_e32 v13, 31, v0
; GFX9-NEXT: v_mov_b32_e32 v8, 0
; GFX9-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v13, v1, v[7:8]
; GFX9-NEXT: v_ashrrev_i32_e32 v14, 31, v1
; GFX9-NEXT: v_mad_i64_i32 v[11:12], s[4:5], v1, v13, 0
; GFX9-NEXT: v_mov_b32_e32 v7, v10
; GFX9-NEXT: v_mov_b32_e32 v10, v8
; GFX9-NEXT: v_mad_u64_u32 v[8:9], s[4:5], v0, v14, v[9:10]
; GFX9-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v14, v0, v[11:12]
; GFX9-NEXT: v_add_co_u32_e32 v9, vcc, v7, v9
; GFX9-NEXT: v_addc_co_u32_e64 v10, s[4:5], 0, 0, vcc
; GFX9-NEXT: v_mad_u64_u32 v[9:10], s[4:5], v13, v14, v[9:10]
; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, v9, v0
; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v10, v1, vcc
; GFX9-NEXT: v_mov_b32_e32 v1, v8
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v6, v2
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, v7, v4, vcc
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v9, v5, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
%sext0 = sext i32 %arg0 to i128 %sext0 = sext i32 %arg0 to i128
%sext1 = sext i32 %arg1 to i128 %sext1 = sext i32 %arg1 to i128
%mul = mul i128 %sext0, %sext1 %mul = mul i128 %sext0, %sext1
@ -172,6 +222,14 @@ define i63 @mad_i64_i32_sextops_i32_i63(i32 %arg0, i32 %arg1, i63 %arg2) #0 {
; SI-NEXT: v_add_i32_e32 v0, vcc, v4, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, v4, v2
; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc ; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_i64_i32_sextops_i32_i63:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
; GFX9-NEXT: v_ashrrev_i64 v[2:3], 1, v[2:3]
; GFX9-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v0, v1, v[2:3]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%sext0 = sext i32 %arg0 to i63 %sext0 = sext i32 %arg0 to i63
%sext1 = sext i32 %arg1 to i63 %sext1 = sext i32 %arg1 to i63
%mul = mul i63 %sext0, %sext1 %mul = mul i63 %sext0, %sext1
@ -202,6 +260,16 @@ define i63 @mad_i64_i32_sextops_i31_i63(i31 %arg0, i31 %arg1, i63 %arg2) #0 {
; SI-NEXT: v_add_i32_e32 v0, vcc, v1, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, v1, v2
; SI-NEXT: v_addc_u32_e32 v1, vcc, v4, v3, vcc ; SI-NEXT: v_addc_u32_e32 v1, vcc, v4, v3, vcc
; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_i64_i32_sextops_i31_i63:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
; GFX9-NEXT: v_bfe_i32 v1, v1, 0, 31
; GFX9-NEXT: v_ashrrev_i64 v[2:3], 1, v[2:3]
; GFX9-NEXT: v_bfe_i32 v0, v0, 0, 31
; GFX9-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v0, v1, v[2:3]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%sext0 = sext i31 %arg0 to i63 %sext0 = sext i31 %arg0 to i63
%sext1 = sext i31 %arg1 to i63 %sext1 = sext i31 %arg1 to i63
%mul = mul i63 %sext0, %sext1 %mul = mul i63 %sext0, %sext1
@ -232,6 +300,17 @@ define i64 @mad_i64_i32_extops_i32_i64(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v2 ; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v2
; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc ; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_i64_i32_extops_i32_i64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v0
; GFX9-NEXT: v_mul_lo_u32 v4, v4, v1
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v0, v1, 0
; GFX9-NEXT: v_add_u32_e32 v1, v1, v4
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
%ext0 = sext i32 %arg0 to i64 %ext0 = sext i32 %arg0 to i64
%ext1 = zext i32 %arg1 to i64 %ext1 = zext i32 %arg1 to i64
%mul = mul i64 %ext0, %ext1 %mul = mul i64 %ext0, %ext1
@ -254,6 +333,12 @@ define i64 @mad_u64_u32_bitops(i64 %arg0, i64 %arg1, i64 %arg2) #0 {
; SI-NEXT: v_add_i32_e32 v0, vcc, v1, v4 ; SI-NEXT: v_add_i32_e32 v0, vcc, v1, v4
; SI-NEXT: v_addc_u32_e32 v1, vcc, v2, v5, vcc ; SI-NEXT: v_addc_u32_e32 v1, vcc, v2, v5, vcc
; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_u64_u32_bitops:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v0, v2, v[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%trunc.lhs = and i64 %arg0, 4294967295 %trunc.lhs = and i64 %arg0, 4294967295
%trunc.rhs = and i64 %arg1, 4294967295 %trunc.rhs = and i64 %arg1, 4294967295
%mul = mul i64 %trunc.lhs, %trunc.rhs %mul = mul i64 %trunc.lhs, %trunc.rhs
@ -284,6 +369,17 @@ define i64 @mad_u64_u32_bitops_lhs_mask_small(i64 %arg0, i64 %arg1, i64 %arg2) #
; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v4 ; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v4
; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc ; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_u64_u32_bitops_lhs_mask_small:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_and_b32_e32 v1, 1, v1
; GFX9-NEXT: v_mul_lo_u32 v3, v1, v2
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v4
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
%trunc.lhs = and i64 %arg0, 8589934591 %trunc.lhs = and i64 %arg0, 8589934591
%trunc.rhs = and i64 %arg1, 4294967295 %trunc.rhs = and i64 %arg1, 4294967295
%mul = mul i64 %trunc.lhs, %trunc.rhs %mul = mul i64 %trunc.lhs, %trunc.rhs
@ -314,6 +410,17 @@ define i64 @mad_u64_u32_bitops_rhs_mask_small(i64 %arg0, i64 %arg1, i64 %arg2) #
; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v4 ; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v4
; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc ; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_u64_u32_bitops_rhs_mask_small:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_and_b32_e32 v1, 1, v3
; GFX9-NEXT: v_mul_lo_u32 v3, v0, v1
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v0, v2, 0
; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v4
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
%trunc.lhs = and i64 %arg0, 4294967295 %trunc.lhs = and i64 %arg0, 4294967295
%trunc.rhs = and i64 %arg1, 8589934591 %trunc.rhs = and i64 %arg1, 8589934591
%mul = mul i64 %trunc.lhs, %trunc.rhs %mul = mul i64 %trunc.lhs, %trunc.rhs
@ -336,6 +443,12 @@ define i64 @mad_i64_i32_bitops(i64 %arg0, i64 %arg1, i64 %arg2) #0 {
; SI-NEXT: v_add_i32_e32 v0, vcc, v1, v4 ; SI-NEXT: v_add_i32_e32 v0, vcc, v1, v4
; SI-NEXT: v_addc_u32_e32 v1, vcc, v2, v5, vcc ; SI-NEXT: v_addc_u32_e32 v1, vcc, v2, v5, vcc
; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_i64_i32_bitops:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v0, v2, v[4:5]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%shl.lhs = shl i64 %arg0, 32 %shl.lhs = shl i64 %arg0, 32
%trunc.lhs = ashr i64 %shl.lhs, 32 %trunc.lhs = ashr i64 %shl.lhs, 32
%shl.rhs = shl i64 %arg1, 32 %shl.rhs = shl i64 %arg1, 32
@ -361,6 +474,12 @@ define i64 @mad_i64_i32_unpack_i64ops(i64 %arg0) #0 {
; SI-NEXT: v_add_i32_e32 v0, vcc, v2, v0 ; SI-NEXT: v_add_i32_e32 v0, vcc, v2, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc ; SI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc
; SI-NEXT: s_setpc_b64 s[30:31] ; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_i64_i32_unpack_i64ops:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v1, v0, v[0:1]
; GFX9-NEXT: s_setpc_b64 s[30:31]
%tmp4 = lshr i64 %arg0, 32 %tmp4 = lshr i64 %arg0, 32
%tmp5 = and i64 %arg0, 4294967295 %tmp5 = and i64 %arg0, 4294967295
%mul = mul nuw i64 %tmp4, %tmp5 %mul = mul nuw i64 %tmp4, %tmp5
@ -368,5 +487,100 @@ define i64 @mad_i64_i32_unpack_i64ops(i64 %arg0) #0 {
ret i64 %mad ret i64 %mad
} }
define amdgpu_kernel void @mad_i64_i32_uniform(i64 addrspace(1)* %out, i32 %arg0, i32 %arg1, i64 %arg2) #0 {
; CI-LABEL: mad_i64_i32_uniform:
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xb
; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: v_mov_b32_e32 v2, s3
; CI-NEXT: v_mov_b32_e32 v0, s4
; CI-NEXT: v_mov_b32_e32 v1, s5
; CI-NEXT: v_mad_u64_u32 v[0:1], s[2:3], s2, v2, v[0:1]
; CI-NEXT: s_mov_b32 s3, 0xf000
; CI-NEXT: s_mov_b32 s2, -1
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; CI-NEXT: s_endpgm
;
; SI-LABEL: mad_i64_i32_uniform:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xb
; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s3
; SI-NEXT: v_mul_hi_u32 v1, s2, v0
; SI-NEXT: s_mul_i32 s2, s2, s3
; SI-NEXT: v_mov_b32_e32 v0, s2
; SI-NEXT: v_mov_b32_e32 v2, s1
; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v2, vcc
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; GFX9-LABEL: mad_i64_i32_uniform:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v2, s3
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[0:1], s2, v2, v[0:1]
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
; GFX9-NEXT: s_endpgm
%ext0 = zext i32 %arg0 to i64
%ext1 = zext i32 %arg1 to i64
%mul = mul i64 %ext0, %ext1
%mad = add i64 %mul, %arg2
store i64 %mad, i64 addrspace(1)* %out
ret void
}
define i64 @mad_i64_i32_multiple(i32 %arg0, i32 %arg1, i64 %arg2, i64 %arg3) #0 {
; CI-LABEL: mad_i64_i32_multiple:
; CI: ; %bb.0:
; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CI-NEXT: v_mad_i64_i32 v[2:3], s[4:5], v0, v1, v[2:3]
; CI-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v0, v1, v[4:5]
; CI-NEXT: v_xor_b32_e32 v1, v3, v1
; CI-NEXT: v_xor_b32_e32 v0, v2, v0
; CI-NEXT: s_setpc_b64 s[30:31]
;
; SI-LABEL: mad_i64_i32_multiple:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mul_lo_u32 v6, v0, v1
; SI-NEXT: v_mul_hi_i32 v0, v0, v1
; SI-NEXT: v_add_i32_e32 v2, vcc, v6, v2
; SI-NEXT: v_addc_u32_e32 v1, vcc, v0, v3, vcc
; SI-NEXT: v_add_i32_e32 v3, vcc, v6, v4
; SI-NEXT: v_addc_u32_e32 v0, vcc, v0, v5, vcc
; SI-NEXT: v_xor_b32_e32 v1, v1, v0
; SI-NEXT: v_xor_b32_e32 v0, v2, v3
; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: mad_i64_i32_multiple:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mad_i64_i32 v[2:3], s[4:5], v0, v1, v[2:3]
; GFX9-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v0, v1, v[4:5]
; GFX9-NEXT: v_xor_b32_e32 v1, v3, v1
; GFX9-NEXT: v_xor_b32_e32 v0, v2, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
%sext0 = sext i32 %arg0 to i64
%sext1 = sext i32 %arg1 to i64
%mul = mul i64 %sext0, %sext1
%mad1 = add i64 %mul, %arg2
%mad2 = add i64 %mul, %arg3
%out = xor i64 %mad1, %mad2
ret i64 %out
}
attributes #0 = { nounwind } attributes #0 = { nounwind }
attributes #1 = { nounwind readnone speculatable } attributes #1 = { nounwind readnone speculatable }