diff --git a/llvm/lib/Target/R600/AMDGPUInstructions.td b/llvm/lib/Target/R600/AMDGPUInstructions.td index ef656b93ce04..972ef1dac979 100644 --- a/llvm/lib/Target/R600/AMDGPUInstructions.td +++ b/llvm/lib/Target/R600/AMDGPUInstructions.td @@ -387,6 +387,7 @@ class global_binary_atomic_op : PatFrag< >; def atomic_add_global : global_binary_atomic_op; +def atomic_and_global : global_binary_atomic_op; def atomic_sub_global : global_binary_atomic_op; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/R600/SIInstructions.td b/llvm/lib/Target/R600/SIInstructions.td index 5ef57b8ee61e..bc1f8392763d 100644 --- a/llvm/lib/Target/R600/SIInstructions.td +++ b/llvm/lib/Target/R600/SIInstructions.td @@ -908,7 +908,9 @@ defm BUFFER_ATOMIC_SUB : MUBUF_Atomic < //def BUFFER_ATOMIC_UMIN : MUBUF_ <0x00000036, "BUFFER_ATOMIC_UMIN", []>; //def BUFFER_ATOMIC_SMAX : MUBUF_ <0x00000037, "BUFFER_ATOMIC_SMAX", []>; //def BUFFER_ATOMIC_UMAX : MUBUF_ <0x00000038, "BUFFER_ATOMIC_UMAX", []>; -//def BUFFER_ATOMIC_AND : MUBUF_ <0x00000039, "BUFFER_ATOMIC_AND", []>; +defm BUFFER_ATOMIC_AND : MUBUF_Atomic < + 0x00000039, "BUFFER_ATOMIC_AND", VReg_32, i32, atomic_and_global +>; //def BUFFER_ATOMIC_OR : MUBUF_ <0x0000003a, "BUFFER_ATOMIC_OR", []>; //def BUFFER_ATOMIC_XOR : MUBUF_ <0x0000003b, "BUFFER_ATOMIC_XOR", []>; //def BUFFER_ATOMIC_INC : MUBUF_ <0x0000003c, "BUFFER_ATOMIC_INC", []>; diff --git a/llvm/test/CodeGen/R600/global_atomics.ll b/llvm/test/CodeGen/R600/global_atomics.ll index 9eb06b9200d6..5feecfc5812f 100644 --- a/llvm/test/CodeGen/R600/global_atomics.ll +++ b/llvm/test/CodeGen/R600/global_atomics.ll @@ -80,6 +80,86 @@ entry: ret void } +; FUNC-LABEL: {{^}}atomic_and_i32_offset: +; SI: BUFFER_ATOMIC_AND v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}} +define void @atomic_and_i32_offset(i32 addrspace(1)* %out, i32 %in) { +entry: + %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst + ret void +} + +; FUNC-LABEL: {{^}}atomic_and_i32_ret_offset: +; SI: BUFFER_ATOMIC_AND [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}} +; SI: BUFFER_STORE_DWORD [[RET]] +define void @atomic_and_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { +entry: + %gep = getelementptr i32 addrspace(1)* %out, i32 4 + %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %0, i32 addrspace(1)* %out2 + ret void +} + +; FUNC-LABEL: {{^}}atomic_and_i32_addr64_offset: +; SI: BUFFER_ATOMIC_AND v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}} +define void @atomic_and_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { +entry: + %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst + ret void +} + +; FUNC-LABEL: {{^}}atomic_and_i32_ret_addr64_offset: +; SI: BUFFER_ATOMIC_AND [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}} +; SI: BUFFER_STORE_DWORD [[RET]] +define void @atomic_and_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { +entry: + %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32 addrspace(1)* %ptr, i32 4 + %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %0, i32 addrspace(1)* %out2 + ret void +} + +; FUNC-LABEL: {{^}}atomic_and_i32: +; SI: BUFFER_ATOMIC_AND v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}} +define void @atomic_and_i32(i32 addrspace(1)* %out, i32 %in) { +entry: + %0 = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst + ret void +} + +; FUNC-LABEL: {{^}}atomic_and_i32_ret: +; SI: BUFFER_ATOMIC_AND [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc +; SI: BUFFER_STORE_DWORD [[RET]] +define void @atomic_and_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { +entry: + %0 = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %0, i32 addrspace(1)* %out2 + ret void +} + +; FUNC-LABEL: {{^}}atomic_and_i32_addr64: +; SI: BUFFER_ATOMIC_AND v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}} +define void @atomic_and_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { +entry: + %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst + ret void +} + +; FUNC-LABEL: {{^}}atomic_and_i32_ret_addr64: +; SI: BUFFER_ATOMIC_AND [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}} +; SI: BUFFER_STORE_DWORD [[RET]] +define void @atomic_and_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { +entry: + %ptr = getelementptr i32 addrspace(1)* %out, i64 %index + %0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %0, i32 addrspace(1)* %out2 + ret void +} + ; FUNC-LABEL: {{^}}atomic_sub_i32_offset: ; SI: BUFFER_ATOMIC_SUB v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}} define void @atomic_sub_i32_offset(i32 addrspace(1)* %out, i32 %in) {