AMDGPU: Add feature for unaligned access

llvm-svn: 274398
This commit is contained in:
Matt Arsenault 2016-07-01 23:03:44 +00:00
parent 8af47a09e5
commit 7f681ac7a9
7 changed files with 280 additions and 165 deletions

View File

@ -61,6 +61,12 @@ def FeatureFlatAddressSpace : SubtargetFeature<"flat-address-space",
"Support flat address space" "Support flat address space"
>; >;
def FeatureUnalignedBufferAccess : SubtargetFeature<"unaligned-buffer-access",
"UnalignedBufferAccess",
"true",
"Support unaligned global loads and stores"
>;
def FeatureXNACK : SubtargetFeature<"xnack", def FeatureXNACK : SubtargetFeature<"xnack",
"EnableXNACK", "EnableXNACK",
"true", "true",

View File

@ -47,7 +47,7 @@ AMDGPUSubtarget::initializeSubtargetDependencies(const Triple &TT,
SmallString<256> FullFS("+promote-alloca,+fp64-denormals,+load-store-opt,"); SmallString<256> FullFS("+promote-alloca,+fp64-denormals,+load-store-opt,");
if (isAmdHsaOS()) // Turn on FlatForGlobal for HSA. if (isAmdHsaOS()) // Turn on FlatForGlobal for HSA.
FullFS += "+flat-for-global,"; FullFS += "+flat-for-global,+unaligned-buffer-access,";
FullFS += FS; FullFS += FS;
ParseSubtargetFeatures(GPU, FullFS); ParseSubtargetFeatures(GPU, FullFS);
@ -85,6 +85,8 @@ AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
FP64Denormals(false), FP64Denormals(false),
FPExceptions(false), FPExceptions(false),
FlatForGlobal(false), FlatForGlobal(false),
UnalignedBufferAccess(false),
EnableXNACK(false), EnableXNACK(false),
DebuggerInsertNops(false), DebuggerInsertNops(false),
DebuggerReserveRegs(false), DebuggerReserveRegs(false),
@ -114,7 +116,6 @@ AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
TexVTXClauseSize(0), TexVTXClauseSize(0),
FeatureDisable(false), FeatureDisable(false),
InstrItins(getInstrItineraryForCPU(GPU)) { InstrItins(getInstrItineraryForCPU(GPU)) {
initializeSubtargetDependencies(TT, GPU, FS); initializeSubtargetDependencies(TT, GPU, FS);
} }

View File

@ -74,6 +74,7 @@ protected:
bool FP64Denormals; bool FP64Denormals;
bool FPExceptions; bool FPExceptions;
bool FlatForGlobal; bool FlatForGlobal;
bool UnalignedBufferAccess;
bool EnableXNACK; bool EnableXNACK;
bool DebuggerInsertNops; bool DebuggerInsertNops;
bool DebuggerReserveRegs; bool DebuggerReserveRegs;
@ -254,6 +255,10 @@ public:
return FlatForGlobal; return FlatForGlobal;
} }
bool hasUnalignedBufferAccess() const {
return UnalignedBufferAccess;
}
bool isXNACKEnabled() const { bool isXNACKEnabled() const {
return EnableXNACK; return EnableXNACK;
} }

View File

@ -438,24 +438,30 @@ bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
if (!VT.isSimple() || VT == MVT::Other) if (!VT.isSimple() || VT == MVT::Other)
return false; return false;
// TODO - CI+ supports unaligned memory accesses, but this requires driver if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
// support. AddrSpace == AMDGPUAS::REGION_ADDRESS) {
// XXX - The only mention I see of this in the ISA manual is for LDS direct
// reads the "byte address and must be dword aligned". Is it also true for the
// normal loads and stores?
if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS) {
// ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
// aligned, 8 byte access in a single operation using ds_read2/write2_b32 // aligned, 8 byte access in a single operation using ds_read2/write2_b32
// with adjacent offsets. // with adjacent offsets.
bool AlignedBy4 = (Align % 4 == 0); bool AlignedBy4 = (Align % 4 == 0);
if (IsFast) if (IsFast)
*IsFast = AlignedBy4; *IsFast = AlignedBy4;
return AlignedBy4; return AlignedBy4;
} }
if (Subtarget->hasUnalignedBufferAccess()) {
// If we have an uniform constant load, it still requires using a slow
// buffer instruction if unaligned.
if (IsFast) {
*IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS) ?
(Align % 4 == 0) : true;
}
return true;
}
// Smaller than dword value must be aligned. // Smaller than dword value must be aligned.
// FIXME: This should be allowed on CI+
if (VT.bitsLT(MVT::i32)) if (VT.bitsLT(MVT::i32))
return false; return false;

View File

@ -183,8 +183,10 @@ def mubuf_load_atomic : PatFrag <(ops node:$ptr), (atomic_load node:$ptr), [{
def smrd_load : PatFrag <(ops node:$ptr), (load node:$ptr), [{ def smrd_load : PatFrag <(ops node:$ptr), (load node:$ptr), [{
return isConstantLoad(cast<LoadSDNode>(N), -1) && auto Ld = cast<LoadSDNode>(N);
static_cast<const SITargetLowering *>(getTargetLowering())->isMemOpUniform(N); return Ld->getAlignment() >= 4 &&
isConstantLoad(Ld, -1) &&
static_cast<const SITargetLowering *>(getTargetLowering())->isMemOpUniform(N);
}]>; }]>;
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -1,9 +1,9 @@
; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC ; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE ; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE
; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC ; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -mcpu=kaveri < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-ALLOCA ; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-ALLOCA
; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC ; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC
; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC ; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=HSAOPT -check-prefix=OPT %s ; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=HSAOPT -check-prefix=OPT %s
; RUN: opt -S -mtriple=amdgcn-unknown-unknown -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=NOHSAOPT -check-prefix=OPT %s ; RUN: opt -S -mtriple=amdgcn-unknown-unknown -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=NOHSAOPT -check-prefix=OPT %s

View File

@ -1,30 +1,28 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s ; RUN: llc -march=amdgcn -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=ALIGNED %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s ; RUN: llc -march=amdgcn -mcpu=bonaire -mattr=+unaligned-buffer-access -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=UNALIGNED %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=ALIGNED %s
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}local_unaligned_load_store_i16: ; SI-LABEL: {{^}}local_unaligned_load_store_i16:
; GCN: ds_read_u8 ; SI: ds_read_u8
; GCN: ds_read_u8 ; SI: ds_read_u8
; GCN: ds_write_b8 ; SI: ds_write_b8
; GCN: ds_write_b8 ; SI: ds_write_b8
; GCN: s_endpgm ; SI: s_endpgm
define void @local_unaligned_load_store_i16(i16 addrspace(3)* %p, i16 addrspace(3)* %r) #0 { define void @local_unaligned_load_store_i16(i16 addrspace(3)* %p, i16 addrspace(3)* %r) #0 {
%v = load i16, i16 addrspace(3)* %p, align 1 %v = load i16, i16 addrspace(3)* %p, align 1
store i16 %v, i16 addrspace(3)* %r, align 1 store i16 %v, i16 addrspace(3)* %r, align 1
ret void ret void
} }
; FUNC-LABEL: {{^}}global_unaligned_load_store_i16: ; SI-LABEL: {{^}}global_unaligned_load_store_i16:
; GCN-NOHSA: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; GCN-NOHSA: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; GCN-NOHSA: buffer_store_byte ; ALIGNED: buffer_store_byte
; GCN-NOHSA: buffer_store_byte ; ALIGNED: buffer_store_byte
; GCN-HSA: flat_load_ubyte ; UNALIGNED: buffer_load_ushort
; GCN-HSA: flat_load_ubyte ; UNALIGNED: buffer_store_short
; GCN-HSA: flat_store_byte ; SI: s_endpgm
; GCN-HSA: flat_store_byte
define void @global_unaligned_load_store_i16(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 { define void @global_unaligned_load_store_i16(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 {
%v = load i16, i16 addrspace(1)* %p, align 1 %v = load i16, i16 addrspace(1)* %p, align 1
store i16 %v, i16 addrspace(1)* %r, align 1 store i16 %v, i16 addrspace(1)* %r, align 1
@ -50,40 +48,32 @@ define void @local_unaligned_load_store_i32(i32 addrspace(3)* %p, i32 addrspace(
ret void ret void
} }
; FUNC-LABEL: {{^}}global_unaligned_load_store_i32: ; SI-LABEL: {{^}}global_unaligned_load_store_i32:
; GCN-NOHSA: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; GCN-NOHSA: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; GCN-NOHSA: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; GCN-NOHSA: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; GCN-NOHSA: buffer_store_byte ; ALIGNED: buffer_store_byte
; GCN-NOHSA: buffer_store_byte ; ALIGNED: buffer_store_byte
; GCN-NOHSA: buffer_store_byte ; ALIGNED: buffer_store_byte
; GCN-NOHSA: buffer_store_byte ; ALIGNED: buffer_store_byte
; GCN-HSA: flat_load_ubyte ; UNALIGNED: buffer_load_dword
; GCN-HSA: flat_load_ubyte ; UNALIGNED: buffer_store_dword
; GCN-HSA: flat_load_ubyte
; GCN-HSA: flat_load_ubyte
; GCN-HSA: flat_store_byte
; GCN-HSA: flat_store_byte
; GCN-HSA: flat_store_byte
; GCN-HSA: flat_store_byte
define void @global_unaligned_load_store_i32(i32 addrspace(1)* %p, i32 addrspace(1)* %r) #0 { define void @global_unaligned_load_store_i32(i32 addrspace(1)* %p, i32 addrspace(1)* %r) #0 {
%v = load i32, i32 addrspace(1)* %p, align 1 %v = load i32, i32 addrspace(1)* %p, align 1
store i32 %v, i32 addrspace(1)* %r, align 1 store i32 %v, i32 addrspace(1)* %r, align 1
ret void ret void
} }
; FUNC-LABEL: {{^}}global_align2_load_store_i32: ; SI-LABEL: {{^}}global_align2_load_store_i32:
; GCN-NOHSA: buffer_load_ushort ; ALIGNED: buffer_load_ushort
; GCN-NOHSA: buffer_load_ushort ; ALIGNED: buffer_load_ushort
; GCN-NOHSA: buffer_store_short ; ALIGNED: buffer_store_short
; GCN-NOHSA: buffer_store_short ; ALIGNED: buffer_store_short
; GCN-HSA: flat_load_ushort ; UNALIGNED: buffer_load_dword
; GCN-HSA: flat_load_ushort ; UNALIGNED: buffer_store_dword
; GCN-HSA: flat_store_short
; GCN-HSA: flat_store_short
define void @global_align2_load_store_i32(i32 addrspace(1)* %p, i32 addrspace(1)* %r) #0 { define void @global_align2_load_store_i32(i32 addrspace(1)* %p, i32 addrspace(1)* %r) #0 {
%v = load i32, i32 addrspace(1)* %p, align 2 %v = load i32, i32 addrspace(1)* %p, align 2
store i32 %v, i32 addrspace(1)* %r, align 2 store i32 %v, i32 addrspace(1)* %r, align 2
@ -142,7 +132,7 @@ define void @local_align2_load_store_i32(i32 addrspace(3)* %p, i32 addrspace(3)*
; SI-NOT: v_lshl ; SI-NOT: v_lshl
; SI: ds_write_b8 ; SI: ds_write_b8
; SI: s_endpgm ; SI: s_endpgm
define void @local_unaligned_load_store_i64(i64 addrspace(3)* %p, i64 addrspace(3)* %r) { define void @local_unaligned_load_store_i64(i64 addrspace(3)* %p, i64 addrspace(3)* %r) #0 {
%v = load i64, i64 addrspace(3)* %p, align 1 %v = load i64, i64 addrspace(3)* %p, align 1
store i64 %v, i64 addrspace(3)* %r, align 1 store i64 %v, i64 addrspace(3)* %r, align 1
ret void ret void
@ -189,61 +179,67 @@ define void @local_unaligned_load_store_i64(i64 addrspace(3)* %p, i64 addrspace(
; SI-NOT: v_lshl ; SI-NOT: v_lshl
; SI: ds_write_b8 ; SI: ds_write_b8
; SI: s_endpgm ; SI: s_endpgm
define void @local_unaligned_load_store_v2i32(<2 x i32> addrspace(3)* %p, <2 x i32> addrspace(3)* %r) { define void @local_unaligned_load_store_v2i32(<2 x i32> addrspace(3)* %p, <2 x i32> addrspace(3)* %r) #0 {
%v = load <2 x i32>, <2 x i32> addrspace(3)* %p, align 1 %v = load <2 x i32>, <2 x i32> addrspace(3)* %p, align 1
store <2 x i32> %v, <2 x i32> addrspace(3)* %r, align 1 store <2 x i32> %v, <2 x i32> addrspace(3)* %r, align 1
ret void ret void
} }
; SI-LABEL: {{^}}global_align2_load_store_i64: ; SI-LABEL: {{^}}global_align2_load_store_i64:
; SI: buffer_load_ushort ; ALIGNED: buffer_load_ushort
; SI: buffer_load_ushort ; ALIGNED: buffer_load_ushort
; SI-NOT: v_or_ ; ALIGNED-NOT: v_or_
; SI-NOT: v_lshl ; ALIGNED-NOT: v_lshl
; SI: buffer_load_ushort ; ALIGNED: buffer_load_ushort
; SI-NOT: v_or_ ; ALIGNED-NOT: v_or_
; SI-NOT: v_lshl ; ALIGNED-NOT: v_lshl
; SI: buffer_load_ushort ; ALIGNED: buffer_load_ushort
; SI-NOT: v_or_ ; ALIGNED-NOT: v_or_
; SI-NOT: v_lshl ; ALIGNED-NOT: v_lshl
; SI: buffer_store_short ; ALIGNED: buffer_store_short
; SI: buffer_store_short ; ALIGNED: buffer_store_short
; SI: buffer_store_short ; ALIGNED: buffer_store_short
; SI: buffer_store_short ; ALIGNED: buffer_store_short
define void @global_align2_load_store_i64(i64 addrspace(1)* %p, i64 addrspace(1)* %r) {
; UNALIGNED: buffer_load_dwordx2
; UNALIGNED: buffer_store_dwordx2
define void @global_align2_load_store_i64(i64 addrspace(1)* %p, i64 addrspace(1)* %r) #0 {
%v = load i64, i64 addrspace(1)* %p, align 2 %v = load i64, i64 addrspace(1)* %p, align 2
store i64 %v, i64 addrspace(1)* %r, align 2 store i64 %v, i64 addrspace(1)* %r, align 2
ret void ret void
} }
; SI-LABEL: {{^}}unaligned_load_store_i64_global: ; SI-LABEL: {{^}}unaligned_load_store_i64_global:
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI-NOT: v_or_ ; ALIGNED-NOT: v_or_
; SI-NOT: v_lshl ; ALIGNED-NOT: v_lshl
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
define void @unaligned_load_store_i64_global(i64 addrspace(1)* %p, i64 addrspace(1)* %r) {
; UNALIGNED: buffer_load_dwordx2
; UNALIGNED: buffer_store_dwordx2
define void @unaligned_load_store_i64_global(i64 addrspace(1)* %p, i64 addrspace(1)* %r) #0 {
%v = load i64, i64 addrspace(1)* %p, align 1 %v = load i64, i64 addrspace(1)* %p, align 1
store i64 %v, i64 addrspace(1)* %r, align 1 store i64 %v, i64 addrspace(1)* %r, align 1
ret void ret void
@ -297,40 +293,43 @@ define void @local_unaligned_load_store_v4i32(<4 x i32> addrspace(3)* %p, <4 x i
} }
; SI-LABEL: {{^}}global_unaligned_load_store_v4i32 ; SI-LABEL: {{^}}global_unaligned_load_store_v4i32
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_load_ubyte ; ALIGNED: buffer_load_ubyte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
; SI: buffer_store_byte ; ALIGNED: buffer_store_byte
define void @global_unaligned_load_store_v4i32(<4 x i32> addrspace(1)* %p, <4 x i32> addrspace(1)* %r) nounwind {
; UNALIGNED: buffer_load_dwordx4
; UNALIGNED: buffer_store_dwordx4
define void @global_unaligned_load_store_v4i32(<4 x i32> addrspace(1)* %p, <4 x i32> addrspace(1)* %r) #0 {
%v = load <4 x i32>, <4 x i32> addrspace(1)* %p, align 1 %v = load <4 x i32>, <4 x i32> addrspace(1)* %p, align 1
store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 1 store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 1
ret void ret void
@ -410,50 +409,146 @@ define void @local_store_i64_align_4_with_split_offset(i64 addrspace(3)* %out) #
ret void ret void
} }
; FUNC-LABEL: {{^}}constant_load_unaligned_i16: ; SI-LABEL: {{^}}constant_unaligned_load_i32:
; GCN-NOHSA: buffer_load_ushort ; ALIGNED: buffer_load_ubyte
; GCN-HSA: flat_load_ushort ; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; EG: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}} ; UNALIGNED: s_load_dword
define void @constant_load_unaligned_i16(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
entry: ; SI: buffer_store_dword
%tmp0 = getelementptr i16, i16 addrspace(2)* %in, i32 1 define void @constant_unaligned_load_i32(i32 addrspace(2)* %p, i32 addrspace(1)* %r) #0 {
%tmp1 = load i16, i16 addrspace(2)* %tmp0 %v = load i32, i32 addrspace(2)* %p, align 1
%tmp2 = zext i16 %tmp1 to i32 store i32 %v, i32 addrspace(1)* %r, align 4
store i32 %tmp2, i32 addrspace(1)* %out
ret void ret void
} }
; FUNC-LABEL: {{^}}constant_load_unaligned_i32: ; SI-LABEL: {{^}}constant_align2_load_i32:
; GCN-NOHSA: buffer_load_ubyte ; ALIGNED: buffer_load_ushort
; GCN-NOHSA: buffer_load_ubyte ; ALIGNED: buffer_load_ushort
; GCN-NOHSA: buffer_load_ubyte
; GCN-NOHSA: buffer_load_ubyte
; GCN-HSA: flat_load_ubyte ; UNALIGNED: s_load_dword
; GCN-HSA: flat_load_ubyte ; UNALIGNED: buffer_store_dword
; GCN-HSA: flat_load_ubyte define void @constant_align2_load_i32(i32 addrspace(2)* %p, i32 addrspace(1)* %r) #0 {
; GCN-HSA: flat_load_ubyte %v = load i32, i32 addrspace(2)* %p, align 2
define void @constant_load_unaligned_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) { store i32 %v, i32 addrspace(1)* %r, align 4
entry:
%tmp0 = load i32, i32 addrspace(2)* %in, align 1
store i32 %tmp0, i32 addrspace(1)* %out
ret void ret void
} }
; FUNC-LABEL: {{^}}constant_load_unaligned_f32: ; SI-LABEL: {{^}}constant_align2_load_i64:
; GCN-NOHSA: buffer_load_ubyte ; ALIGNED: buffer_load_ushort
; GCN-NOHSA: buffer_load_ubyte ; ALIGNED: buffer_load_ushort
; GCN-NOHSA: buffer_load_ubyte ; ALIGNED: buffer_load_ushort
; GCN-NOHSA: buffer_load_ubyte ; ALIGNED: buffer_load_ushort
; GCN-HSA: flat_load_ubyte ; UNALIGNED: s_load_dwordx2
; GCN-HSA: flat_load_ubyte ; UNALIGNED: buffer_store_dwordx2
; GCN-HSA: flat_load_ubyte define void @constant_align2_load_i64(i64 addrspace(2)* %p, i64 addrspace(1)* %r) #0 {
; GCN-HSA: flat_load_ubyte %v = load i64, i64 addrspace(2)* %p, align 2
define void @constant_load_unaligned_f32(float addrspace(1)* %out, float addrspace(2)* %in) { store i64 %v, i64 addrspace(1)* %r, align 4
%tmp1 = load float, float addrspace(2)* %in, align 1 ret void
store float %tmp1, float addrspace(1)* %out }
; SI-LABEL: {{^}}constant_align4_load_i64:
; SI: s_load_dwordx2
; SI: buffer_store_dwordx2
define void @constant_align4_load_i64(i64 addrspace(2)* %p, i64 addrspace(1)* %r) #0 {
%v = load i64, i64 addrspace(2)* %p, align 4
store i64 %v, i64 addrspace(1)* %r, align 4
ret void
}
; SI-LABEL: {{^}}constant_align4_load_v4i32:
; SI: s_load_dwordx4
; SI: buffer_store_dwordx4
define void @constant_align4_load_v4i32(<4 x i32> addrspace(2)* %p, <4 x i32> addrspace(1)* %r) #0 {
%v = load <4 x i32>, <4 x i32> addrspace(2)* %p, align 4
store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 4
ret void
}
; SI-LABEL: {{^}}constant_unaligned_load_v2i32:
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; UNALIGNED: buffer_load_dwordx2
; SI: buffer_store_dwordx2
define void @constant_unaligned_load_v2i32(<2 x i32> addrspace(2)* %p, <2 x i32> addrspace(1)* %r) #0 {
%v = load <2 x i32>, <2 x i32> addrspace(2)* %p, align 1
store <2 x i32> %v, <2 x i32> addrspace(1)* %r, align 4
ret void
}
; SI-LABEL: {{^}}constant_unaligned_load_v4i32:
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; ALIGNED: buffer_load_ubyte
; UNALIGNED: buffer_load_dwordx4
; SI: buffer_store_dwordx4
define void @constant_unaligned_load_v4i32(<4 x i32> addrspace(2)* %p, <4 x i32> addrspace(1)* %r) #0 {
%v = load <4 x i32>, <4 x i32> addrspace(2)* %p, align 1
store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 4
ret void
}
; SI-LABEL: {{^}}constant_align4_load_i8:
; SI: buffer_load_ubyte
; SI: buffer_store_byte
define void @constant_align4_load_i8(i8 addrspace(2)* %p, i8 addrspace(1)* %r) #0 {
%v = load i8, i8 addrspace(2)* %p, align 4
store i8 %v, i8 addrspace(1)* %r, align 4
ret void
}
; SI-LABEL: {{^}}constant_align2_load_i8:
; SI: buffer_load_ubyte
; SI: buffer_store_byte
define void @constant_align2_load_i8(i8 addrspace(2)* %p, i8 addrspace(1)* %r) #0 {
%v = load i8, i8 addrspace(2)* %p, align 2
store i8 %v, i8 addrspace(1)* %r, align 2
ret void
}
; SI-LABEL: {{^}}constant_align4_merge_load_2_i32:
; SI: s_load_dwordx2 s{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0{{$}}
; SI-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[LO]]
; SI-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[HI]]
; SI: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
define void @constant_align4_merge_load_2_i32(i32 addrspace(2)* %p, i32 addrspace(1)* %r) #0 {
%gep0 = getelementptr i32, i32 addrspace(2)* %p, i64 1
%v0 = load i32, i32 addrspace(2)* %p, align 4
%v1 = load i32, i32 addrspace(2)* %gep0, align 4
%gep1 = getelementptr i32, i32 addrspace(1)* %r, i64 1
store i32 %v0, i32 addrspace(1)* %r, align 4
store i32 %v1, i32 addrspace(1)* %gep1, align 4
ret void ret void
} }