Let llvm.invariant.group.barrier accepts pointer to any address space

llvm.invariant.group.barrier may accept pointers to arbitrary address space.

This patch let it accept pointers to i8 in any address space and returns
pointer to i8 in the same address space.

Differential Revision: https://reviews.llvm.org/D39973

llvm-svn: 318413
This commit is contained in:
Yaxun Liu 2017-11-16 16:32:16 +00:00
parent e9eb7f0cb8
commit 407ca36b27
8 changed files with 65 additions and 59 deletions

View File

@ -12817,10 +12817,13 @@ This intrinsic indicates that the memory is mutable again.
Syntax: Syntax:
""""""" """""""
This is an overloaded intrinsic. The memory object can belong to any address
space. The returned pointer must belong to the same address space as the
argument.
:: ::
declare i8* @llvm.invariant.group.barrier(i8* <ptr>) declare i8* @llvm.invariant.group.barrier.p0i8(i8* <ptr>)
Overview: Overview:
""""""""" """""""""

View File

@ -1810,26 +1810,28 @@ public:
/// \brief Create an invariant.group.barrier intrinsic call, that stops /// \brief Create an invariant.group.barrier intrinsic call, that stops
/// optimizer to propagate equality using invariant.group metadata. /// optimizer to propagate equality using invariant.group metadata.
/// If Ptr type is different from i8*, it's casted to i8* before call /// If Ptr type is different from pointer to i8, it's casted to pointer to i8
/// and casted back to Ptr type after call. /// in the same address space before call and casted back to Ptr type after
/// call.
Value *CreateInvariantGroupBarrier(Value *Ptr) { Value *CreateInvariantGroupBarrier(Value *Ptr) {
assert(isa<PointerType>(Ptr->getType()) &&
"invariant.group.barrier only applies to pointers.");
auto *PtrType = Ptr->getType();
auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
if (PtrType != Int8PtrTy)
Ptr = CreateBitCast(Ptr, Int8PtrTy);
Module *M = BB->getParent()->getParent(); Module *M = BB->getParent()->getParent();
Function *FnInvariantGroupBarrier = Intrinsic::getDeclaration(M, Function *FnInvariantGroupBarrier = Intrinsic::getDeclaration(
Intrinsic::invariant_group_barrier); M, Intrinsic::invariant_group_barrier, {Int8PtrTy});
Type *ArgumentAndReturnType = FnInvariantGroupBarrier->getReturnType(); assert(FnInvariantGroupBarrier->getReturnType() == Int8PtrTy &&
assert(ArgumentAndReturnType == FnInvariantGroupBarrier->getFunctionType()->getParamType(0) ==
FnInvariantGroupBarrier->getFunctionType()->getParamType(0) && Int8PtrTy &&
"InvariantGroupBarrier should take and return the same type"); "InvariantGroupBarrier should take and return the same type");
Type *PtrType = Ptr->getType();
bool PtrTypeConversionNeeded = PtrType != ArgumentAndReturnType;
if (PtrTypeConversionNeeded)
Ptr = CreateBitCast(Ptr, ArgumentAndReturnType);
CallInst *Fn = CreateCall(FnInvariantGroupBarrier, {Ptr}); CallInst *Fn = CreateCall(FnInvariantGroupBarrier, {Ptr});
if (PtrTypeConversionNeeded) if (PtrType != Int8PtrTy)
return CreateBitCast(Fn, PtrType); return CreateBitCast(Fn, PtrType);
return Fn; return Fn;
} }

View File

@ -711,8 +711,8 @@ def int_invariant_end : Intrinsic<[],
// which is valid. // which is valid.
// The argument also can't be marked with 'returned' attribute, because // The argument also can't be marked with 'returned' attribute, because
// it would remove barrier. // it would remove barrier.
def int_invariant_group_barrier : Intrinsic<[llvm_ptr_ty], def int_invariant_group_barrier : Intrinsic<[llvm_anyptr_ty],
[llvm_ptr_ty], [LLVMMatchType<0>],
[IntrReadMem, IntrArgMemOnly]>; [IntrReadMem, IntrArgMemOnly]>;
//===------------------------ Stackmap Intrinsics -------------------------===// //===------------------------ Stackmap Intrinsics -------------------------===//

View File

@ -17,8 +17,8 @@ define i32 @foo(i32* %a) {
%1 = bitcast i32* %a to i8* %1 = bitcast i32* %a to i8*
; CHECK: MemoryUse(2) ; CHECK: MemoryUse(2)
; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier(i8* %1) ; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
%a8 = call i8* @llvm.invariant.group.barrier(i8* %1) %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32* %a32 = bitcast i8* %a8 to i32*
; This have to be MemoryUse(2), because we can't skip the barrier based on ; This have to be MemoryUse(2), because we can't skip the barrier based on
@ -36,8 +36,8 @@ define i32 @skipBarrier(i32* %a) {
%1 = bitcast i32* %a to i8* %1 = bitcast i32* %a to i8*
; CHECK: MemoryUse(1) ; CHECK: MemoryUse(1)
; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier(i8* %1) ; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
%a8 = call i8* @llvm.invariant.group.barrier(i8* %1) %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32* %a32 = bitcast i8* %a8 to i32*
; We can skip the barrier only if the "skip" is not based on !invariant.group. ; We can skip the barrier only if the "skip" is not based on !invariant.group.
@ -55,8 +55,8 @@ define i32 @skipBarrier2(i32* %a) {
%1 = bitcast i32* %a to i8* %1 = bitcast i32* %a to i8*
; CHECK: MemoryUse(liveOnEntry) ; CHECK: MemoryUse(liveOnEntry)
; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier(i8* %1) ; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
%a8 = call i8* @llvm.invariant.group.barrier(i8* %1) %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32* %a32 = bitcast i8* %a8 to i32*
; We can skip the barrier only if the "skip" is not based on !invariant.group. ; We can skip the barrier only if the "skip" is not based on !invariant.group.
@ -86,8 +86,8 @@ define i32 @handleInvariantGroups(i32* %a) {
store i32 1, i32* @g, align 4 store i32 1, i32* @g, align 4
%1 = bitcast i32* %a to i8* %1 = bitcast i32* %a to i8*
; CHECK: MemoryUse(2) ; CHECK: MemoryUse(2)
; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier(i8* %1) ; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
%a8 = call i8* @llvm.invariant.group.barrier(i8* %1) %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32* %a32 = bitcast i8* %a8 to i32*
; CHECK: MemoryUse(2) ; CHECK: MemoryUse(2)
@ -145,8 +145,8 @@ entry:
call void @clobber8(i8* %p) call void @clobber8(i8* %p)
; CHECK: MemoryUse(2) ; CHECK: MemoryUse(2)
; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier(i8* %p) ; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
%after = call i8* @llvm.invariant.group.barrier(i8* %p) %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
br i1 undef, label %Loop.Body, label %Loop.End br i1 undef, label %Loop.Body, label %Loop.End
Loop.Body: Loop.Body:
@ -192,8 +192,8 @@ entry:
call void @clobber8(i8* %p) call void @clobber8(i8* %p)
; CHECK: MemoryUse(2) ; CHECK: MemoryUse(2)
; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier(i8* %p) ; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
%after = call i8* @llvm.invariant.group.barrier(i8* %p) %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
br i1 undef, label %Loop.Body, label %Loop.End br i1 undef, label %Loop.Body, label %Loop.End
Loop.Body: Loop.Body:
@ -253,8 +253,8 @@ entry:
; CHECK-NEXT: call void @clobber ; CHECK-NEXT: call void @clobber
call void @clobber8(i8* %p) call void @clobber8(i8* %p)
; CHECK: MemoryUse(2) ; CHECK: MemoryUse(2)
; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier(i8* %p) ; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
%after = call i8* @llvm.invariant.group.barrier(i8* %p) %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
br i1 undef, label %Loop.Pre, label %Loop.End br i1 undef, label %Loop.Pre, label %Loop.End
Loop.Pre: Loop.Pre:
@ -293,7 +293,7 @@ Ret:
ret i8 %3 ret i8 %3
} }
declare i8* @llvm.invariant.group.barrier(i8*) declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
declare void @clobber(i32*) declare void @clobber(i32*)
declare void @clobber8(i8*) declare void @clobber8(i8*)

View File

@ -1,22 +1,23 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mtriple=amdgcn---amdgiz -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
target datalayout = "A5"
declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) #0 declare {}* @llvm.invariant.start.p5i8(i64, i8 addrspace(5)* nocapture) #0
declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) #0 declare void @llvm.invariant.end.p5i8({}*, i64, i8 addrspace(5)* nocapture) #0
declare i8* @llvm.invariant.group.barrier(i8*) #1 declare i8 addrspace(5)* @llvm.invariant.group.barrier.p5i8(i8 addrspace(5)*) #1
; GCN-LABEL: {{^}}use_invariant_promotable_lds: ; GCN-LABEL: {{^}}use_invariant_promotable_lds:
; GCN: buffer_load_dword ; GCN: buffer_load_dword
; GCN: ds_write_b32 ; GCN: ds_write_b32
define amdgpu_kernel void @use_invariant_promotable_lds(i32 addrspace(1)* %arg) #2 { define amdgpu_kernel void @use_invariant_promotable_lds(i32 addrspace(1)* %arg) #2 {
bb: bb:
%tmp = alloca i32, align 4 %tmp = alloca i32, align 4, addrspace(5)
%tmp1 = bitcast i32* %tmp to i8* %tmp1 = bitcast i32 addrspace(5)* %tmp to i8 addrspace(5)*
%tmp2 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 1 %tmp2 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 1
%tmp3 = load i32, i32 addrspace(1)* %tmp2 %tmp3 = load i32, i32 addrspace(1)* %tmp2
store i32 %tmp3, i32* %tmp store i32 %tmp3, i32 addrspace(5)* %tmp
%tmp4 = call {}* @llvm.invariant.start.p0i8(i64 4, i8* %tmp1) #0 %tmp4 = call {}* @llvm.invariant.start.p5i8(i64 4, i8 addrspace(5)* %tmp1) #0
call void @llvm.invariant.end.p0i8({}* %tmp4, i64 4, i8* %tmp1) #0 call void @llvm.invariant.end.p5i8({}* %tmp4, i64 4, i8 addrspace(5)* %tmp1) #0
%tmp5 = call i8* @llvm.invariant.group.barrier(i8* %tmp1) #1 %tmp5 = call i8 addrspace(5)* @llvm.invariant.group.barrier.p5i8(i8 addrspace(5)* %tmp1) #1
ret void ret void
} }

View File

@ -12,10 +12,10 @@ define i8 @optimizable() {
entry: entry:
%ptr = alloca i8 %ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0 store i8 42, i8* %ptr, !invariant.group !0
; CHECK: call i8* @llvm.invariant.group.barrier ; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
%ptr2 = call i8* @llvm.invariant.group.barrier(i8* %ptr) %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
; CHECK-NOT: call i8* @llvm.invariant.group.barrier ; CHECK-NOT: call i8* @llvm.invariant.group.barrier.p0i8
%ptr3 = call i8* @llvm.invariant.group.barrier(i8* %ptr) %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
; CHECK: call void @clobber(i8* {{.*}}%ptr) ; CHECK: call void @clobber(i8* {{.*}}%ptr)
call void @clobber(i8* %ptr) call void @clobber(i8* %ptr)
@ -34,11 +34,11 @@ define i8 @unoptimizable() {
entry: entry:
%ptr = alloca i8 %ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0 store i8 42, i8* %ptr, !invariant.group !0
; CHECK: call i8* @llvm.invariant.group.barrier ; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
%ptr2 = call i8* @llvm.invariant.group.barrier(i8* %ptr) %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
call void @clobber(i8* %ptr) call void @clobber(i8* %ptr)
; CHECK: call i8* @llvm.invariant.group.barrier ; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
%ptr3 = call i8* @llvm.invariant.group.barrier(i8* %ptr) %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
; CHECK: call void @clobber(i8* {{.*}}%ptr) ; CHECK: call void @clobber(i8* {{.*}}%ptr)
call void @clobber(i8* %ptr) call void @clobber(i8* %ptr)
; CHECK: call void @use(i8* {{.*}}%ptr2) ; CHECK: call void @use(i8* {{.*}}%ptr2)
@ -55,8 +55,8 @@ declare void @use(i8* readonly)
declare void @clobber(i8*) declare void @clobber(i8*)
; CHECK: Function Attrs: argmemonly nounwind readonly ; CHECK: Function Attrs: argmemonly nounwind readonly
; CHECK-NEXT: declare i8* @llvm.invariant.group.barrier(i8*) ; CHECK-NEXT: declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
declare i8* @llvm.invariant.group.barrier(i8*) declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
!0 = !{} !0 = !{}

View File

@ -6,10 +6,10 @@
define void @foo() { define void @foo() {
enter: enter:
; CHECK-NOT: !invariant.group ; CHECK-NOT: !invariant.group
; CHECK-NOT: @llvm.invariant.group.barrier( ; CHECK-NOT: @llvm.invariant.group.barrier.p0i8(
; CHECK: %val = load i8, i8* @tmp, !tbaa ; CHECK: %val = load i8, i8* @tmp, !tbaa
%val = load i8, i8* @tmp, !invariant.group !0, !tbaa !{!1, !1, i64 0} %val = load i8, i8* @tmp, !invariant.group !0, !tbaa !{!1, !1, i64 0}
%ptr = call i8* @llvm.invariant.group.barrier(i8* @tmp) %ptr = call i8* @llvm.invariant.group.barrier.p0i8(i8* @tmp)
; CHECK: store i8 42, i8* @tmp ; CHECK: store i8 42, i8* @tmp
store i8 42, i8* %ptr, !invariant.group !0 store i8 42, i8* %ptr, !invariant.group !0
@ -18,7 +18,7 @@ enter:
} }
; CHECK-LABEL: } ; CHECK-LABEL: }
declare i8* @llvm.invariant.group.barrier(i8*) declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
!0 = !{!"something"} !0 = !{!"something"}
!1 = !{!"x", !0} !1 = !{!"x", !0}

View File

@ -25,7 +25,7 @@ define i8 @optimizable1() {
entry: entry:
%ptr = alloca i8 %ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0 store i8 42, i8* %ptr, !invariant.group !0
%ptr2 = call i8* @llvm.invariant.group.barrier(i8* %ptr) %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
%a = load i8, i8* %ptr, !invariant.group !0 %a = load i8, i8* %ptr, !invariant.group !0
call void @foo(i8* %ptr2); call to use %ptr2 call void @foo(i8* %ptr2); call to use %ptr2
@ -242,7 +242,7 @@ define i8 @optimizable4() {
entry: entry:
%ptr = alloca i8 %ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0 store i8 42, i8* %ptr, !invariant.group !0
%ptr2 = call i8* @llvm.invariant.group.barrier(i8* %ptr) %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
; CHECK-NOT: load ; CHECK-NOT: load
%a = load i8, i8* %ptr2, !invariant.group !0 %a = load i8, i8* %ptr2, !invariant.group !0
@ -314,7 +314,7 @@ entry:
; CHECK: store i8 %unknownValue, i8* %ptr, !invariant.group !0 ; CHECK: store i8 %unknownValue, i8* %ptr, !invariant.group !0
store i8 %unknownValue, i8* %ptr, !invariant.group !0 store i8 %unknownValue, i8* %ptr, !invariant.group !0
%newPtr2 = call i8* @llvm.invariant.group.barrier(i8* %ptr) %newPtr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
; CHECK-NOT: load ; CHECK-NOT: load
%d = load i8, i8* %newPtr2, !invariant.group !0 %d = load i8, i8* %newPtr2, !invariant.group !0
; CHECK: ret i8 %unknownValue ; CHECK: ret i8 %unknownValue
@ -441,7 +441,7 @@ declare void @_ZN1A3fooEv(%struct.A*)
declare void @_ZN1AC1Ev(%struct.A*) declare void @_ZN1AC1Ev(%struct.A*)
declare void @fooBit(i1*, i1) declare void @fooBit(i1*, i1)
declare i8* @llvm.invariant.group.barrier(i8*) declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
; Function Attrs: nounwind ; Function Attrs: nounwind
declare void @llvm.assume(i1 %cmp.vtables) #0 declare void @llvm.assume(i1 %cmp.vtables) #0