[IR] Enable opaque pointers by default

This enabled opaque pointers by default in LLVM. The effect of this
is twofold:

* If IR that contains *neither* explicit ptr nor %T* types is passed
  to tools, we will now use opaque pointer mode, unless
  -opaque-pointers=0 has been explicitly passed.
* Users of LLVM as a library will now default to opaque pointers.
  It is possible to opt-out by calling setOpaquePointers(false) on
  LLVMContext.

A cmake option to toggle this default will not be provided. Frontends
or other tools that want to (temporarily) keep using typed pointers
should disable opaque pointers via LLVMContext.

Differential Revision: https://reviews.llvm.org/D126689
This commit is contained in:
Nikita Popov 2022-05-31 11:58:35 +02:00
parent 6333e5dde9
commit 41d5033eb1
166 changed files with 1621 additions and 1657 deletions

View File

@ -53,12 +53,12 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
target triple = "x86_64-unknown-linux-gnu"
declare void @f2()
declare i8* @f3()
declare ptr @f3()
define void @f1() {
call void @f2()
; Make sure that the backend can handle undefined references.
; Do an indirect call so that the undefined ref shows up in the combined index.
call void bitcast (i8*()* @f3 to void()*)()
call void @f3()
ret void
}

View File

@ -1,29 +1,29 @@
// Create a sample address sanitizer bitcode library.
// RUN: %clang_cc1 -no-opaque-pointers -x ir -fcuda-is-device -triple amdgcn-amd-amdhsa -emit-llvm-bc \
// RUN: %clang_cc1 -x ir -fcuda-is-device -triple amdgcn-amd-amdhsa -emit-llvm-bc \
// RUN: -disable-llvm-passes -o %t.asanrtl.bc %S/Inputs/amdgpu-asanrtl.ll
// Check sanitizer runtime library functions survive
// optimizations without being removed or parameters altered.
// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -fsanitize=address \
// RUN: -mlink-bitcode-file %t.asanrtl.bc -x hip \
// RUN: | FileCheck -check-prefixes=ASAN %s
// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -fsanitize=address \
// RUN: -O3 -mlink-bitcode-file %t.asanrtl.bc -x hip \
// RUN: | FileCheck -check-prefixes=ASAN %s
// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -x hip \
// RUN: | FileCheck %s
// REQUIRES: amdgpu-registered-target
// ASAN-DAG: define weak void @__amdgpu_device_library_preserve_asan_functions()
// ASAN-DAG: @__amdgpu_device_library_preserve_asan_functions_ptr = weak addrspace(1) constant void ()* @__amdgpu_device_library_preserve_asan_functions
// ASAN-DAG: @__amdgpu_device_library_preserve_asan_functions_ptr = weak addrspace(1) constant ptr @__amdgpu_device_library_preserve_asan_functions
// ASAN-DAG: @llvm.compiler.used = {{.*}}@__amdgpu_device_library_preserve_asan_functions_ptr
// ASAN-DAG: define weak void @__asan_report_load1(i64 %{{.*}})

View File

@ -25,9 +25,9 @@
// ELF-WARNING: is not an ELF image, so notes cannot be added to it.
// CHECK-IR: target triple = "x86_64-pc-linux-gnu"
// CHECK-IR-DAG: [[ENTTY:%.+]] = type { i8*, i8*, i{{32|64}}, i32, i32 }
// CHECK-IR-DAG: [[IMAGETY:%.+]] = type { i8*, i8*, [[ENTTY]]*, [[ENTTY]]* }
// CHECK-IR-DAG: [[DESCTY:%.+]] = type { i32, [[IMAGETY]]*, [[ENTTY]]*, [[ENTTY]]* }
// CHECK-IR-DAG: [[ENTTY:%.+]] = type { ptr, ptr, i{{32|64}}, i32, i32 }
// CHECK-IR-DAG: [[IMAGETY:%.+]] = type { ptr, ptr, ptr, ptr }
// CHECK-IR-DAG: [[DESCTY:%.+]] = type { i32, ptr, ptr, ptr }
// CHECK-IR: [[ENTBEGIN:@.+]] = external hidden constant [[ENTTY]]
// CHECK-IR: [[ENTEND:@.+]] = external hidden constant [[ENTTY]]
@ -36,24 +36,24 @@
// CHECK-IR: [[BIN:@.+]] = internal unnamed_addr constant [[BINTY:\[[0-9]+ x i8\]]] c"Content of device file{{.+}}"
// CHECK-IR: [[IMAGES:@.+]] = internal unnamed_addr constant [1 x [[IMAGETY]]] [{{.+}} { i8* getelementptr inbounds ([[BINTY]], [[BINTY]]* [[BIN]], i64 0, i64 0), i8* getelementptr inbounds ([[BINTY]], [[BINTY]]* [[BIN]], i64 1, i64 0), [[ENTTY]]* [[ENTBEGIN]], [[ENTTY]]* [[ENTEND]] }]
// CHECK-IR: [[IMAGES:@.+]] = internal unnamed_addr constant [1 x [[IMAGETY]]] [{{.+}} { ptr [[BIN]], ptr getelementptr inbounds ([[BINTY]], ptr [[BIN]], i64 1, i64 0), ptr [[ENTBEGIN]], ptr [[ENTEND]] }]
// CHECK-IR: [[DESC:@.+]] = internal constant [[DESCTY]] { i32 1, [[IMAGETY]]* getelementptr inbounds ([1 x [[IMAGETY]]], [1 x [[IMAGETY]]]* [[IMAGES]], i64 0, i64 0), [[ENTTY]]* [[ENTBEGIN]], [[ENTTY]]* [[ENTEND]] }
// CHECK-IR: [[DESC:@.+]] = internal constant [[DESCTY]] { i32 1, ptr [[IMAGES]], ptr [[ENTBEGIN]], ptr [[ENTEND]] }
// CHECK-IR: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* [[REGFN:@.+]], i8* null }]
// CHECK-IR: @llvm.global_dtors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* [[UNREGFN:@.+]], i8* null }]
// CHECK-IR: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr [[REGFN:@.+]], ptr null }]
// CHECK-IR: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr [[UNREGFN:@.+]], ptr null }]
// CHECK-IR: define internal void [[REGFN]]()
// CHECK-IR: call void @__tgt_register_lib([[DESCTY]]* [[DESC]])
// CHECK-IR: call void @__tgt_register_lib(ptr [[DESC]])
// CHECK-IR: ret void
// CHECK-IR: declare void @__tgt_register_lib([[DESCTY]]*)
// CHECK-IR: declare void @__tgt_register_lib(ptr)
// CHECK-IR: define internal void [[UNREGFN]]()
// CHECK-IR: call void @__tgt_unregister_lib([[DESCTY]]* [[DESC]])
// CHECK-IR: call void @__tgt_unregister_lib(ptr [[DESC]])
// CHECK-IR: ret void
// CHECK-IR: declare void @__tgt_unregister_lib([[DESCTY]]*)
// CHECK-IR: declare void @__tgt_unregister_lib(ptr)
// Check that clang-offload-wrapper adds LLVMOMPOFFLOAD notes
// into the ELF offload images:

View File

@ -12,20 +12,20 @@
// OPENMP-NEXT: @__stop_omp_offloading_entries = external hidden constant %__tgt_offload_entry
// OPENMP-NEXT: @__dummy.omp_offloading.entry = hidden constant [0 x %__tgt_offload_entry] zeroinitializer, section "omp_offloading_entries"
// OPENMP-NEXT: @.omp_offloading.device_image = internal unnamed_addr constant [0 x i8] zeroinitializer
// OPENMP-NEXT: @.omp_offloading.device_images = internal unnamed_addr constant [1 x %__tgt_device_image] [%__tgt_device_image { i8* getelementptr inbounds ([0 x i8], [0 x i8]* @.omp_offloading.device_image, i64 0, i64 0), i8* getelementptr inbounds ([0 x i8], [0 x i8]* @.omp_offloading.device_image, i64 0, i64 0), %__tgt_offload_entry* @__start_omp_offloading_entries, %__tgt_offload_entry* @__stop_omp_offloading_entries }]
// OPENMP-NEXT: @.omp_offloading.descriptor = internal constant %__tgt_bin_desc { i32 1, %__tgt_device_image* getelementptr inbounds ([1 x %__tgt_device_image], [1 x %__tgt_device_image]* @.omp_offloading.device_images, i64 0, i64 0), %__tgt_offload_entry* @__start_omp_offloading_entries, %__tgt_offload_entry* @__stop_omp_offloading_entries }
// OPENMP-NEXT: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @.omp_offloading.descriptor_reg, i8* null }]
// OPENMP-NEXT: @llvm.global_dtors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @.omp_offloading.descriptor_unreg, i8* null }]
// OPENMP-NEXT: @.omp_offloading.device_images = internal unnamed_addr constant [1 x %__tgt_device_image] [%__tgt_device_image { ptr @.omp_offloading.device_image, ptr @.omp_offloading.device_image, ptr @__start_omp_offloading_entries, ptr @__stop_omp_offloading_entries }]
// OPENMP-NEXT: @.omp_offloading.descriptor = internal constant %__tgt_bin_desc { i32 1, ptr @.omp_offloading.device_images, ptr @__start_omp_offloading_entries, ptr @__stop_omp_offloading_entries }
// OPENMP-NEXT: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.omp_offloading.descriptor_reg, ptr null }]
// OPENMP-NEXT: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.omp_offloading.descriptor_unreg, ptr null }]
// OPENMP: define internal void @.omp_offloading.descriptor_reg() section ".text.startup" {
// OPENMP-NEXT: entry:
// OPENMP-NEXT: call void @__tgt_register_lib(%__tgt_bin_desc* @.omp_offloading.descriptor)
// OPENMP-NEXT: call void @__tgt_register_lib(ptr @.omp_offloading.descriptor)
// OPENMP-NEXT: ret void
// OPENMP-NEXT: }
// OPENMP: define internal void @.omp_offloading.descriptor_unreg() section ".text.startup" {
// OPENMP-NEXT: entry:
// OPENMP-NEXT: call void @__tgt_unregister_lib(%__tgt_bin_desc* @.omp_offloading.descriptor)
// OPENMP-NEXT: call void @__tgt_unregister_lib(ptr @.omp_offloading.descriptor)
// OPENMP-NEXT: ret void
// OPENMP-NEXT: }
@ -36,56 +36,56 @@
// RUN: -linker-path /usr/bin/ld -- %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=CUDA
// CUDA: @.fatbin_image = internal constant [0 x i8] zeroinitializer, section ".nv_fatbin"
// CUDA-NEXT: @.fatbin_wrapper = internal constant %fatbin_wrapper { i32 1180844977, i32 1, i8* getelementptr inbounds ([0 x i8], [0 x i8]* @.fatbin_image, i32 0, i32 0), i8* null }, section ".nvFatBinSegment", align 8
// CUDA-NEXT: @.fatbin_wrapper = internal constant %fatbin_wrapper { i32 1180844977, i32 1, ptr @.fatbin_image, ptr null }, section ".nvFatBinSegment", align 8
// CUDA-NEXT: @__dummy.cuda_offloading.entry = hidden constant [0 x %__tgt_offload_entry] zeroinitializer, section "cuda_offloading_entries"
// CUDA-NEXT: @.cuda.binary_handle = internal global i8** null
// CUDA-NEXT: @.cuda.binary_handle = internal global ptr null
// CUDA-NEXT: @__start_cuda_offloading_entries = external hidden constant [0 x %__tgt_offload_entry]
// CUDA-NEXT: @__stop_cuda_offloading_entries = external hidden constant [0 x %__tgt_offload_entry]
// CUDA-NEXT: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @.cuda.fatbin_reg, i8* null }]
// CUDA-NEXT: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.cuda.fatbin_reg, ptr null }]
// CUDA: define internal void @.cuda.fatbin_reg() section ".text.startup" {
// CUDA-NEXT: entry:
// CUDA-NEXT: %0 = call i8** @__cudaRegisterFatBinary(i8* bitcast (%fatbin_wrapper* @.fatbin_wrapper to i8*))
// CUDA-NEXT: store i8** %0, i8*** @.cuda.binary_handle, align 8
// CUDA-NEXT: call void @.cuda.globals_reg(i8** %0)
// CUDA-NEXT: call void @__cudaRegisterFatBinaryEnd(i8** %0)
// CUDA-NEXT: %1 = call i32 @atexit(void ()* @.cuda.fatbin_unreg)
// CUDA-NEXT: %0 = call ptr @__cudaRegisterFatBinary(ptr @.fatbin_wrapper)
// CUDA-NEXT: store ptr %0, ptr @.cuda.binary_handle, align 8
// CUDA-NEXT: call void @.cuda.globals_reg(ptr %0)
// CUDA-NEXT: call void @__cudaRegisterFatBinaryEnd(ptr %0)
// CUDA-NEXT: %1 = call i32 @atexit(ptr @.cuda.fatbin_unreg)
// CUDA-NEXT: ret void
// CUDA-NEXT: }
// CUDA: define internal void @.cuda.fatbin_unreg() section ".text.startup" {
// CUDA-NEXT: entry:
// CUDA-NEXT: %0 = load i8**, i8*** @.cuda.binary_handle, align 8
// CUDA-NEXT: call void @__cudaUnregisterFatBinary(i8** %0)
// CUDA-NEXT: %0 = load ptr, ptr @.cuda.binary_handle, align 8
// CUDA-NEXT: call void @__cudaUnregisterFatBinary(ptr %0)
// CUDA-NEXT: ret void
// CUDA-NEXT: }
// CUDA: define internal void @.cuda.globals_reg(i8** %0) section ".text.startup" {
// CUDA: define internal void @.cuda.globals_reg(ptr %0) section ".text.startup" {
// CUDA-NEXT: entry:
// CUDA-NEXT: br i1 icmp ne ([0 x %__tgt_offload_entry]* @__start_cuda_offloading_entries, [0 x %__tgt_offload_entry]* @__stop_cuda_offloading_entries), label %while.entry, label %while.end
// CUDA-NEXT: br i1 icmp ne (ptr @__start_cuda_offloading_entries, ptr @__stop_cuda_offloading_entries), label %while.entry, label %while.end
// CUDA: while.entry:
// CUDA-NEXT: %entry1 = phi %__tgt_offload_entry* [ getelementptr inbounds ([0 x %__tgt_offload_entry], [0 x %__tgt_offload_entry]* @__start_cuda_offloading_entries, i64 0, i64 0), %entry ], [ %7, %if.end ]
// CUDA-NEXT: %1 = getelementptr inbounds %__tgt_offload_entry, %__tgt_offload_entry* %entry1, i64 0, i32 0
// CUDA-NEXT: %addr = load i8*, i8** %1, align 8
// CUDA-NEXT: %2 = getelementptr inbounds %__tgt_offload_entry, %__tgt_offload_entry* %entry1, i64 0, i32 1
// CUDA-NEXT: %name = load i8*, i8** %2, align 8
// CUDA-NEXT: %3 = getelementptr inbounds %__tgt_offload_entry, %__tgt_offload_entry* %entry1, i64 0, i32 2
// CUDA-NEXT: %size = load i64, i64* %3, align 4
// CUDA-NEXT: %entry1 = phi ptr [ @__start_cuda_offloading_entries, %entry ], [ %7, %if.end ]
// CUDA-NEXT: %1 = getelementptr inbounds %__tgt_offload_entry, ptr %entry1, i64 0, i32 0
// CUDA-NEXT: %addr = load ptr, ptr %1, align 8
// CUDA-NEXT: %2 = getelementptr inbounds %__tgt_offload_entry, ptr %entry1, i64 0, i32 1
// CUDA-NEXT: %name = load ptr, ptr %2, align 8
// CUDA-NEXT: %3 = getelementptr inbounds %__tgt_offload_entry, ptr %entry1, i64 0, i32 2
// CUDA-NEXT: %size = load i64, ptr %3, align 4
// CUDA-NEXT: %4 = icmp eq i64 %size, 0
// CUDA-NEXT: br i1 %4, label %if.then, label %if.else
// CUDA: if.then:
// CUDA-NEXT: %5 = call i32 @__cudaRegisterFunction(i8** %0, i8* %addr, i8* %name, i8* %name, i32 -1, i8* null, i8* null, i8* null, i8* null, i32* null)
// CUDA-NEXT: %5 = call i32 @__cudaRegisterFunction(ptr %0, ptr %addr, ptr %name, ptr %name, i32 -1, ptr null, ptr null, ptr null, ptr null, ptr null)
// CUDA-NEXT: br label %if.end
// CUDA: if.else:
// CUDA-NEXT: %6 = call i32 @__cudaRegisterVar(i8** %0, i8* %addr, i8* %name, i8* %name, i32 0, i64 %size, i32 0, i32 0)
// CUDA-NEXT: %6 = call i32 @__cudaRegisterVar(ptr %0, ptr %addr, ptr %name, ptr %name, i32 0, i64 %size, i32 0, i32 0)
// CUDA-NEXT: br label %if.end
// CUDA: if.end:
// CUDA-NEXT: %7 = getelementptr inbounds %__tgt_offload_entry, %__tgt_offload_entry* %entry1, i64 1
// CUDA-NEXT: %8 = icmp eq %__tgt_offload_entry* %7, getelementptr inbounds ([0 x %__tgt_offload_entry], [0 x %__tgt_offload_entry]* @__stop_cuda_offloading_entries, i64 0, i64 0)
// CUDA-NEXT: %7 = getelementptr inbounds %__tgt_offload_entry, ptr %entry1, i64 1
// CUDA-NEXT: %8 = icmp eq ptr %7, @__stop_cuda_offloading_entries
// CUDA-NEXT: br i1 %8, label %while.end, label %while.entry
// CUDA: while.end:

View File

@ -195,7 +195,7 @@ Transition State
================
As of April 2022 both LLVM and Clang have complete support for opaque pointers,
and opaque pointers are enabled by default in Clang.
and opaque pointers are enabled by default in LLVM and Clang.
For users of the clang driver interface, it is possible to temporarily restore
the old default using the ``-DCLANG_ENABLE_OPAQUE_POINTERS=OFF`` cmake option,
@ -208,8 +208,13 @@ the cc1 interface.
Usage for LTO can be disabled by passing ``-Wl,-plugin-opt=no-opaque-pointers``
to the clang driver.
For users of LLVM as a library, opaque pointers can be disabled by calling
``setOpaquePointers(false)`` on the ``LLVMContext``.
For users of LLVM tools like opt, opaque pointers can be disabled by passing
``-opaque-pointers=0``.
The next steps for the opaque pointer migration are:
* Migrate Clang/LLVM tests to use opaque pointers.
* Enable opaque pointers by default in LLVM.
* Remove support for typed pointers after the LLVM 15 branch has been created.

View File

@ -36,7 +36,7 @@ using namespace llvm;
static cl::opt<bool>
OpaquePointersCL("opaque-pointers", cl::desc("Use opaque pointers"),
cl::init(false));
cl::init(true));
LLVMContextImpl::LLVMContextImpl(LLVMContext &C)
: DiagHandler(std::make_unique<DiagnosticHandler>()),

View File

@ -8,7 +8,7 @@
; BARE: }
@B = external global i32
; ANNOT: @B = external global i32 ; [#uses=0 type=i32*]
; ANNOT: @B = external global i32 ; [#uses=0 type=ptr]
define <4 x i1> @foo(<4 x float> %a, <4 x float> %b) nounwind {
entry:

View File

@ -1,28 +1,28 @@
; RUN: not opt -S < %s 2>&1 | FileCheck %s
; CHECK: Intrinsic has incorrect return type!
; CHECK-NEXT: float (double, <2 x double>)* @llvm.vector.reduce.fadd.f32.f64.v2f64
; CHECK-NEXT: ptr @llvm.vector.reduce.fadd.f32.f64.v2f64
define float @fadd_invalid_scalar_res(double %acc, <2 x double> %in) {
%res = call float @llvm.vector.reduce.fadd.f32.f64.v2f64(double %acc, <2 x double> %in)
ret float %res
}
; CHECK: Intrinsic has incorrect argument type!
; CHECK-NEXT: double (float, <2 x double>)* @llvm.vector.reduce.fadd.f64.f32.v2f64
; CHECK-NEXT: ptr @llvm.vector.reduce.fadd.f64.f32.v2f64
define double @fadd_invalid_scalar_start(float %acc, <2 x double> %in) {
%res = call double @llvm.vector.reduce.fadd.f64.f32.v2f64(float %acc, <2 x double> %in)
ret double %res
}
; CHECK: Intrinsic has incorrect return type!
; CHECK-NEXT: <2 x double> (double, <2 x double>)* @llvm.vector.reduce.fadd.v2f64.f64.v2f64
; CHECK-NEXT: ptr @llvm.vector.reduce.fadd.v2f64.f64.v2f64
define <2 x double> @fadd_invalid_vector_res(double %acc, <2 x double> %in) {
%res = call <2 x double> @llvm.vector.reduce.fadd.v2f64.f64.v2f64(double %acc, <2 x double> %in)
ret <2 x double> %res
}
; CHECK: Intrinsic has incorrect argument type!
; CHECK-NEXT: double (<2 x double>, <2 x double>)* @llvm.vector.reduce.fadd.f64.v2f64.v2f64
; CHECK-NEXT: ptr @llvm.vector.reduce.fadd.f64.v2f64.v2f64
define double @fadd_invalid_vector_start(<2 x double> %in, <2 x double> %acc) {
%res = call double @llvm.vector.reduce.fadd.f64.v2f64.v2f64(<2 x double> %acc, <2 x double> %in)
ret double %res

View File

@ -1,7 +1,7 @@
; RUN: llvm-as < %s | llvm-c-test --module-list-globals | FileCheck %s
@foo = constant [7 x i8] c"foobar\00", align 1
;CHECK: GlobalDefinition: foo [7 x i8]*
;CHECK: GlobalDefinition: foo ptr
@bar = common global i32 0, align 4
;CHECK: GlobalDefinition: bar i32*
;CHECK: GlobalDefinition: bar ptr

View File

@ -20,7 +20,7 @@ body: |
; CHECK: bb.1:
; CHECK: successors:
; CHECK: bb.2:
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p0) :: (load (s32) from `i32* undef`, align 8)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p0) :: (load (s32) from `ptr undef`, align 8)
; CHECK: [[MUL:%[0-9]+]]:_(s32) = nsw G_MUL [[C]], [[LOAD]]
; CHECK: [[MUL1:%[0-9]+]]:_(s32) = nsw G_MUL [[MUL]], [[C1]]
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@ -45,7 +45,7 @@ body: |
bb.3:
%2:_(s32) = G_LOAD %3(p0) :: (load (s32) from `i32* undef`, align 8)
%2:_(s32) = G_LOAD %3(p0) :: (load (s32) from `ptr undef`, align 8)
%5:_(s32) = nsw G_MUL %4, %2
%7:_(s32) = nsw G_MUL %5, %6
%9:_(s32) = nsw G_MUL %7, %8

View File

@ -34,7 +34,7 @@ body: |
; CHECK-NEXT: [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[COPY]]
; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[FCVTHSr]], %subreg.hsub
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]]
; CHECK-NEXT: STRHHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `half* undef`)
; CHECK-NEXT: STRHHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `ptr undef`)
; CHECK-NEXT: B %bb.2
bb.0:
successors: %bb.1(0x80000000)
@ -55,7 +55,7 @@ body: |
%3:gpr(s16) = G_PHI %1(s16), %bb.1, %5(s16), %bb.2
%5:fpr(s16) = G_FPTRUNC %8(s32)
G_STORE %3(s16), %4(p0) :: (store (s16) into `half* undef`)
G_STORE %3(s16), %4(p0) :: (store (s16) into `ptr undef`)
G_BR %bb.2
...
@ -93,7 +93,7 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[PHI:%[0-9]+]]:fpr16 = PHI %7, %bb.2, [[COPY2]], %bb.1
; CHECK-NEXT: [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[COPY]]
; CHECK-NEXT: STRHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `half* undef`)
; CHECK-NEXT: STRHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `ptr undef`)
; CHECK-NEXT: B %bb.2
bb.0:
successors: %bb.1(0x80000000)
@ -114,7 +114,7 @@ body: |
%3:fpr(s16) = G_PHI %5(s16), %bb.2, %1(s16), %bb.1
%5:fpr(s16) = G_FPTRUNC %8(s32)
G_STORE %3(s16), %4(p0) :: (store (s16) into `half* undef`)
G_STORE %3(s16), %4(p0) :: (store (s16) into `ptr undef`)
G_BR %bb.2
...

View File

@ -15,8 +15,8 @@
# CHECK-LABLE: test
# CHECK: bb.0:
# CHECK-NEXT: liveins: $x0, $x17, $x18
# CHECK: renamable $q13_q14_q15 = LD3Threev16b undef renamable $x17 :: (load (s384) from `<16 x i8>* undef`, align 64)
# CHECK-NEXT: renamable $q23_q24_q25 = LD3Threev16b undef renamable $x18 :: (load (s384) from `<16 x i8>* undef`, align 64)
# CHECK: renamable $q13_q14_q15 = LD3Threev16b undef renamable $x17 :: (load (s384) from `ptr undef`, align 64)
# CHECK-NEXT: renamable $q23_q24_q25 = LD3Threev16b undef renamable $x18 :: (load (s384) from `ptr undef`, align 64)
# CHECK-NEXT: $q16 = EXTv16i8 renamable $q23, renamable $q23, 8
# CHECK-NEXT: renamable $q20 = EXTv16i8 renamable $q14, renamable $q14, 8
# CHECK-NEXT: STRQui killed renamable $q20, $sp, 4 :: (store (s128))

View File

@ -20,7 +20,7 @@ body: |
; CHECK: B %bb.2
; CHECK: bb.1:
; CHECK: successors: %bb.9(0x80000000)
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[DEF3]], 0 :: (load (s64) from `i64* undef`)
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[DEF3]], 0 :: (load (s64) from `ptr undef`)
; CHECK: B %bb.9
; CHECK: bb.2:
; CHECK: successors: %bb.3(0x40000000), %bb.4(0x40000000)
@ -71,7 +71,7 @@ body: |
bb.2:
successors: %bb.8(0x80000000)
%8:gpr64 = LDRXui %9, 0 :: (load (s64) from `i64* undef`)
%8:gpr64 = LDRXui %9, 0 :: (load (s64) from `ptr undef`)
B %bb.8
bb.3:

View File

@ -303,8 +303,8 @@ body: |
; CHECK: %ptr2:_(p1) = G_IMPLICIT_DEF
; CHECK: %ptr3:_(p1) = COPY $vgpr2_vgpr3
; CHECK: %ptr4:_(p1) = COPY $vgpr4_vgpr5
; CHECK: G_STORE %src1(s32), %ptr1(p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; CHECK: G_STORE %src2(s32), %ptr2(p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; CHECK: G_STORE %src1(s32), %ptr1(p1) :: (volatile store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; CHECK: G_STORE %src2(s32), %ptr2(p1) :: (volatile store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; CHECK: %div:_(s32), %rem:_ = G_SDIVREM %src1, %src2
; CHECK: G_STORE %div(s32), %ptr3(p1) :: (store (s32), addrspace 1)
; CHECK: G_STORE %rem(s32), %ptr4(p1) :: (store (s32), addrspace 1)

View File

@ -8,7 +8,7 @@ body: |
; GCN-LABEL: name: merge_flat_load_dword_2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `i32* undef`, align 4)
; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -25,7 +25,7 @@ body: |
; GCN-LABEL: name: merge_flat_load_dword_3
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 0, 1, implicit $exec, implicit $flat_scr :: (load (s96) from `i32* undef`, align 4)
; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 0, 1, implicit $exec, implicit $flat_scr :: (load (s96) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[FLAT_LOAD_DWORDX3_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX3_]].sub2
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
@ -45,7 +45,7 @@ body: |
; GCN-LABEL: name: merge_flat_load_dword_4
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 2, implicit $exec, implicit $flat_scr :: (load (s128) from `i32* undef`, align 4)
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 2, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
@ -68,14 +68,14 @@ body: |
; GCN-LABEL: name: merge_flat_load_dword_5
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 3, implicit $exec, implicit $flat_scr :: (load (s128) from `i32* undef`, align 4)
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 3, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 16, 3, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 16, 3, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[FLAT_LOAD_DWORD]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 3, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
@ -93,14 +93,14 @@ body: |
; GCN-LABEL: name: merge_flat_load_dword_6
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i32* undef`, align 4)
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 16, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `i32* undef`, align 4)
; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 16, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[COPY6]], implicit [[COPY7]]
@ -121,7 +121,7 @@ body: |
; GCN-LABEL: name: merge_flat_load_dwordx2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i64* undef`, align 4)
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub2_sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -138,7 +138,7 @@ body: |
; GCN-LABEL: name: merge_flat_load_dwordx3_with_dwordx1
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 12, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i128* undef`, align 8)
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 12, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 8)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -155,7 +155,7 @@ body: |
; GCN-LABEL: name: merge_flat_load_dwordx1_with_dwordx2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 12, 0, implicit $exec, implicit $flat_scr :: (load (s96) from `i32* undef`, align 4)
; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 12, 0, implicit $exec, implicit $flat_scr :: (load (s96) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX3_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[FLAT_LOAD_DWORDX3_]].sub1_sub2
; GCN-NEXT: S_NOP 0, implicit [[COPY1]], implicit [[COPY]]
@ -172,8 +172,8 @@ body: |
; GCN-LABEL: name: no_merge_flat_load_dword_agpr_with_vgpr
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:agpr_32 = FLAT_LOAD_DWORD [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:agpr_32 = FLAT_LOAD_DWORD [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[FLAT_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
@ -188,8 +188,8 @@ body: |
; GCN-LABEL: name: no_merge_flat_load_dword_disjoint
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 8, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 8, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[FLAT_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
@ -204,8 +204,8 @@ body: |
; GCN-LABEL: name: no_merge_flat_load_dword_overlap
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 3, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 3, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[FLAT_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
@ -220,8 +220,8 @@ body: |
; GCN-LABEL: name: no_merge_flat_load_dword_different_cpol
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 1, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 1, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[FLAT_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 1, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`, align 4)
@ -239,7 +239,7 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `i32* undef`, align 4)
; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -259,7 +259,7 @@ body: |
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, killed [[DEF3]], %subreg.sub2
; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE1]], 4, 1, implicit $exec, implicit $flat_scr :: (store (s96) into `i32* undef`, align 4)
; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE1]], 4, 1, implicit $exec, implicit $flat_scr :: (store (s96) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -280,7 +280,7 @@ body: |
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF1]].sub1, %subreg.sub1, [[DEF1]].sub0, %subreg.sub0
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[DEF1]].sub2, %subreg.sub2, killed [[REG_SEQUENCE]], %subreg.sub0_sub1
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[DEF1]].sub3, %subreg.sub3, killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 2, implicit $exec, implicit $flat_scr :: (store (s128) into `i32* undef`, align 4)
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 2, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vreg_128 = IMPLICIT_DEF
FLAT_STORE_DWORD %0, %1.sub1, 8, 2, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`, align 4)
@ -304,8 +304,8 @@ body: |
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:areg_64_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:areg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF3]], %subreg.sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:areg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF4]], %subreg.sub3
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 3, implicit $exec, implicit $flat_scr :: (store (s128) into `i32* undef`, align 4)
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF5]], 20, 3, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 3, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 4)
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF5]], 20, 3, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:agpr_32 = IMPLICIT_DEF
%2:agpr_32 = IMPLICIT_DEF
@ -335,9 +335,9 @@ body: |
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF3]], %subreg.sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF4]], %subreg.sub3
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `i32* undef`, align 8)
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 8)
; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF5]], %subreg.sub0, [[DEF6]], %subreg.sub1
; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE3]], 20, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `i32* undef`, align 4)
; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE3]], 20, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -363,7 +363,7 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0_sub1, killed [[DEF2]], %subreg.sub2_sub3
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `i64* undef`, align 4)
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
%2:vreg_64_align2 = IMPLICIT_DEF
@ -381,7 +381,7 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_96_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0_sub1_sub2, killed [[DEF2]], %subreg.sub3
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `i64* undef`)
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vreg_96_align2 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -398,8 +398,8 @@ body: |
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:agpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -416,8 +416,8 @@ body: |
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 6, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 6, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -434,8 +434,8 @@ body: |
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 2, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`, align 2)
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 2, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`, align 2)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -452,8 +452,8 @@ body: |
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 1, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 1, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
; GCN-NEXT: FLAT_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -470,8 +470,8 @@ body: |
; GCN: [[DEF:%[0-9]+]]:vreg_128_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]].sub0_sub1, killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]].sub2_sub3, killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]].sub0_sub1, killed [[DEF1]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]].sub2_sub3, killed [[DEF2]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
%0:vreg_128_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF

View File

@ -8,7 +8,7 @@ body: |
; GCN-LABEL: name: merge_flat_global_load_dword_2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `float* undef` + 4, align 4)
; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr undef` + 4, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -25,7 +25,7 @@ body: |
; GCN-LABEL: name: merge_global_flat_load_dword_2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `float addrspace(1)* undef`)
; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr addrspace(1) undef`)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -42,7 +42,7 @@ body: |
; GCN-LABEL: name: merge_global_flat_load_dword_3
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s96) from `float* undef`, align 16)
; GCN-NEXT: [[FLAT_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = FLAT_LOAD_DWORDX3 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s96) from `ptr undef`, align 16)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[FLAT_LOAD_DWORDX3_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX3_]].sub2
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1
@ -62,7 +62,7 @@ body: |
; GCN-LABEL: name: merge_global_flat_load_dword_4
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i32 addrspace(1)* undef` + 4, align 4, basealign 8)
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr addrspace(1) undef` + 4, align 4, basealign 8)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
@ -85,7 +85,7 @@ body: |
; GCN-LABEL: name: merge_flat_global_load_dwordx2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `double* undef`, align 8)
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 8)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[FLAT_LOAD_DWORDX4_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub2_sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -102,7 +102,7 @@ body: |
; GCN-LABEL: name: merge_flat_global_load_dwordx3
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `float* undef`, align 4)
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX4_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_96_align2 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub1_sub2_sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -119,7 +119,7 @@ body: |
; GCN-LABEL: name: merge_global_flat_load_dwordx3
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `i32 addrspace(1)* undef`, align 4)
; GCN-NEXT: [[FLAT_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = FLAT_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s128) from `ptr addrspace(1) undef`, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX4_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_96_align2 = COPY killed [[FLAT_LOAD_DWORDX4_]].sub1_sub2_sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -137,8 +137,8 @@ body: |
; GCN-LABEL: name: no_merge_flat_global_load_dword_saddr
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `float* undef`)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF1]], [[DEF]].sub0, 4, 0, implicit $exec :: (load (s64) from `i32 addrspace(1)* undef` + 4, align 4, addrspace 1)
; GCN-NEXT: [[FLAT_LOAD_DWORD:%[0-9]+]]:vgpr_32 = FLAT_LOAD_DWORD [[DEF]], 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF1]], [[DEF]].sub0, 4, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef` + 4, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_SADDR]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_SADDR]].sub1
; GCN-NEXT: S_NOP 0, implicit [[FLAT_LOAD_DWORD]], implicit [[COPY]], implicit [[COPY1]]
@ -158,8 +158,8 @@ body: |
; GCN-LABEL: name: no_merge_global_saddr_flat_load_dword
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF1]], [[DEF]].sub0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `i32* undef` + 4, align 4)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF1]], [[DEF]].sub0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: [[FLAT_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = FLAT_LOAD_DWORDX2 [[DEF]], 4, 0, implicit $exec, implicit $flat_scr :: (load (s64) from `ptr undef` + 4, align 4)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[FLAT_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[FLAT_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD_SADDR]], implicit [[COPY]], implicit [[COPY1]]
@ -180,7 +180,7 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `i32* undef`, align 4)
; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -197,7 +197,7 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `i32 addrspace(1)* undef`, align 4)
; GCN-NEXT: FLAT_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s64) into `ptr addrspace(1) undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -214,7 +214,7 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1_sub2
; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s96) into `i32* undef`, align 4)
; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s96) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vreg_64_align2 = IMPLICIT_DEF
@ -231,7 +231,7 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_96_align2 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1_sub2_sub3
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `i32* undef`, align 4)
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`, align 4)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vreg_96_align2 = IMPLICIT_DEF
@ -248,7 +248,7 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub2, [[DEF2]], %subreg.sub0_sub1
; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s96) into `i64* undef`, align 8)
; GCN-NEXT: FLAT_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s96) into `ptr undef`, align 8)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vreg_64_align2 = IMPLICIT_DEF
@ -265,7 +265,7 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_96_align2 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub3, [[DEF2]], %subreg.sub0_sub1_sub2
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `<3 x i32>* undef`)
; GCN-NEXT: FLAT_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s128) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vreg_96_align2 = IMPLICIT_DEF
@ -282,8 +282,8 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF2]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF3]], [[DEF1]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF2]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF3]], [[DEF1]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:sreg_64_xexec = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -301,8 +301,8 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF2]], [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF3]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`)
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF]].sub0, [[DEF2]], [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: FLAT_STORE_DWORD [[DEF]], [[DEF3]], 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:sreg_64_xexec = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF

View File

@ -8,7 +8,7 @@ body: |
; GCN-LABEL: name: merge_global_load_dword_2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec :: (load (s64) from `float addrspace(1)* undef` + 4, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef` + 4, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -25,7 +25,7 @@ body: |
; GCN-LABEL: name: merge_global_load_dword_3
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 0, 1, implicit $exec :: (load (s96) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 0, 1, implicit $exec :: (load (s96) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX3_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX3_]].sub2
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
@ -45,7 +45,7 @@ body: |
; GCN-LABEL: name: merge_global_load_dword_4
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 2, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 2, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
@ -68,14 +68,14 @@ body: |
; GCN-LABEL: name: merge_global_load_dword_5
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 3, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 3, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 16, 3, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 16, 3, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[GLOBAL_LOAD_DWORD]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 3, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
@ -93,14 +93,14 @@ body: |
; GCN-LABEL: name: merge_global_load_dword_6
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 16, 0, implicit $exec :: (load (s64) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 16, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub0
; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[COPY6]], implicit [[COPY7]]
@ -121,7 +121,7 @@ body: |
; GCN-LABEL: name: merge_global_load_dwordx2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec :: (load (s128) from `i64 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 0, 0, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub2_sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -138,7 +138,7 @@ body: |
; GCN-LABEL: name: merge_global_load_dwordx3_with_dwordx1
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 12, 0, implicit $exec :: (load (s128) from `i128 addrspace(1)* undef`, align 8, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4 [[DEF]], 12, 0, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 8, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_]].sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -155,7 +155,7 @@ body: |
; GCN-LABEL: name: merge_global_load_dwordx1_with_dwordx2
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 12, 0, implicit $exec :: (load (s96) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 12, 0, implicit $exec :: (load (s96) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX3_]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[GLOBAL_LOAD_DWORDX3_]].sub1_sub2
; GCN-NEXT: S_NOP 0, implicit [[COPY1]], implicit [[COPY]]
@ -172,8 +172,8 @@ body: |
; GCN-LABEL: name: no_merge_global_load_dword_agpr_with_vgpr
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:agpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:agpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
@ -188,8 +188,8 @@ body: |
; GCN-LABEL: name: no_merge_global_load_dword_disjoint
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 8, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 8, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
@ -204,8 +204,8 @@ body: |
; GCN-LABEL: name: no_merge_global_load_dword_overlap
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 3, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 3, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
@ -220,8 +220,8 @@ body: |
; GCN-LABEL: name: no_merge_global_load_dword_different_cpol
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 1, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 0, 1, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF]], 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD1]]
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 1, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
@ -237,7 +237,7 @@ body: |
; GCN-LABEL: name: merge_global_load_dword_saddr_2
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s64) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_SADDR]].sub0
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_SADDR]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -256,7 +256,7 @@ body: |
; GCN-LABEL: name: merge_global_load_dword_saddr_3
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_SADDR:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3_SADDR [[DEF]], [[DEF1]], 0, 1, implicit $exec :: (load (s96) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_SADDR:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3_SADDR [[DEF]], [[DEF1]], 0, 1, implicit $exec :: (load (s96) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX3_SADDR]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX3_SADDR]].sub2
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub0
@ -278,7 +278,7 @@ body: |
; GCN-LABEL: name: merge_global_load_dword_saddr_4
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 2, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 2, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_SADDR]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
@ -303,14 +303,14 @@ body: |
; GCN-LABEL: name: merge_global_load_dword_saddr_6
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 4, 3, implicit $exec :: (load (s128) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 4, 3, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_96_align2 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0_sub1_sub2
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX4_SADDR]].sub3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64_align2 = COPY [[COPY]].sub0_sub1
; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY killed [[COPY]].sub2
; GCN-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]].sub0
; GCN-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY killed [[COPY2]].sub1
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 20, 3, implicit $exec :: (load (s64) from `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_SADDR:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2_SADDR [[DEF]], [[DEF1]], 20, 3, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_SADDR]].sub0
; GCN-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_SADDR]].sub1
; GCN-NEXT: S_NOP 0, implicit [[COPY4]], implicit [[COPY5]], implicit [[COPY3]], implicit [[COPY1]], implicit [[COPY6]], implicit [[COPY7]]
@ -333,7 +333,7 @@ body: |
; GCN-LABEL: name: merge_global_load_dwordx2_saddr
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s128) from `i64 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128_align2 = GLOBAL_LOAD_DWORDX4_SADDR [[DEF]], [[DEF1]], 0, 0, implicit $exec :: (load (s128) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY killed [[GLOBAL_LOAD_DWORDX4_SADDR]].sub2_sub3
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -352,8 +352,8 @@ body: |
; GCN-LABEL: name: no_merge_global_load_dword_and_global_load_dword_saddr
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD]], implicit [[GLOBAL_LOAD_DWORD_SADDR]]
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
@ -370,8 +370,8 @@ body: |
; GCN-LABEL: name: no_merge_global_load_dword_saddr_different_saddr
; GCN: [[DEF:%[0-9]+]]:sgpr_128 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]].sub0_sub1, [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]].sub2_sub3, [[DEF1]], 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]].sub0_sub1, [[DEF1]], 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]].sub2_sub3, [[DEF1]], 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD_SADDR]], implicit [[GLOBAL_LOAD_DWORD_SADDR1]]
%0:sgpr_128 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
@ -388,8 +388,8 @@ body: |
; GCN-LABEL: name: no_merge_global_load_dword_saddr_different_vaddr
; GCN: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub1, 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR [[DEF]], [[DEF1]].sub1, 4, 0, implicit $exec :: (load (s32) from `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: S_NOP 0, implicit [[GLOBAL_LOAD_DWORD_SADDR]], implicit [[GLOBAL_LOAD_DWORD_SADDR1]]
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
@ -404,7 +404,7 @@ body: |
; GCN-LABEL: name: merge_global_load_dword_2_out_of_order
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec :: (load (s64) from `float addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64_align2 = GLOBAL_LOAD_DWORDX2 [[DEF]], 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_]].sub0
; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
@ -421,7 +421,7 @@ body: |
; GCN-LABEL: name: merge_global_load_dword_3_out_of_order
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 0, 0, implicit $exec :: (load (s96) from `float addrspace(1)* undef`, align 16, addrspace 1)
; GCN-NEXT: [[GLOBAL_LOAD_DWORDX3_:%[0-9]+]]:vreg_96_align2 = GLOBAL_LOAD_DWORDX3 [[DEF]], 0, 0, implicit $exec :: (load (s96) from `ptr addrspace(1) undef`, align 16, addrspace 1)
; GCN-NEXT: [[COPY:%[0-9]+]]:vreg_64_align2 = COPY [[GLOBAL_LOAD_DWORDX3_]].sub0_sub1
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX3_]].sub2
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]].sub1
@ -445,7 +445,7 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
; GCN-NEXT: GLOBAL_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec :: (store (s64) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE]], 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -465,7 +465,7 @@ body: |
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0, killed [[DEF2]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, killed [[DEF3]], %subreg.sub2
; GCN-NEXT: GLOBAL_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE1]], 4, 1, implicit $exec :: (store (s96) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORDX3 [[DEF]], killed [[REG_SEQUENCE1]], 4, 1, implicit $exec :: (store (s96) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -486,7 +486,7 @@ body: |
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF1]].sub1, %subreg.sub1, [[DEF1]].sub0, %subreg.sub0
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[DEF1]].sub2, %subreg.sub2, killed [[REG_SEQUENCE]], %subreg.sub0_sub1
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[DEF1]].sub3, %subreg.sub3, killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2
; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 2, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 2, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vreg_128 = IMPLICIT_DEF
GLOBAL_STORE_DWORD %0, %1.sub1, 8, 2, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
@ -510,8 +510,8 @@ body: |
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:areg_64_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:areg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF3]], %subreg.sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:areg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF4]], %subreg.sub3
; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 3, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], [[DEF5]], 20, 3, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 3, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], [[DEF5]], 20, 3, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:agpr_32 = IMPLICIT_DEF
%2:agpr_32 = IMPLICIT_DEF
@ -541,9 +541,9 @@ body: |
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF1]], %subreg.sub0, [[DEF2]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF3]], %subreg.sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF4]], %subreg.sub3
; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 0, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 8, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE2]], 4, 0, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 8, addrspace 1)
; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF5]], %subreg.sub0, [[DEF6]], %subreg.sub1
; GCN-NEXT: GLOBAL_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE3]], 20, 0, implicit $exec :: (store (s64) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORDX2 [[DEF]], killed [[REG_SEQUENCE3]], 20, 0, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -569,7 +569,7 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0_sub1, killed [[DEF2]], %subreg.sub2_sub3
; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec :: (store (s128) into `i64 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
%2:vreg_64_align2 = IMPLICIT_DEF
@ -587,7 +587,7 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_96_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[DEF1]], %subreg.sub0_sub1_sub2, killed [[DEF2]], %subreg.sub3
; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec :: (store (s128) into `i64 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORDX4 [[DEF]], killed [[REG_SEQUENCE]], 4, 0, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vreg_96_align2 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -604,8 +604,8 @@ body: |
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:agpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:agpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -622,8 +622,8 @@ body: |
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 6, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 6, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -640,8 +640,8 @@ body: |
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 2, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, align 2, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 2, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, align 2, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -658,8 +658,8 @@ body: |
; GCN: [[DEF:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 1, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]], killed [[DEF1]], 0, 1, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD killed [[DEF]], killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_64_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -676,8 +676,8 @@ body: |
; GCN: [[DEF:%[0-9]+]]:vreg_128_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]].sub0_sub1, killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]].sub2_sub3, killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]].sub0_sub1, killed [[DEF1]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF]].sub2_sub3, killed [[DEF2]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:vreg_128_align2 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -696,7 +696,7 @@ body: |
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE]], [[DEF]], 0, 0, implicit $exec :: (store (s64) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE]], [[DEF]], 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -718,7 +718,7 @@ body: |
; GCN-NEXT: [[DEF4:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF4]], %subreg.sub2
; GCN-NEXT: GLOBAL_STORE_DWORDX3_SADDR [[DEF1]], killed [[REG_SEQUENCE1]], [[DEF]], 4, 1, implicit $exec :: (store (s96) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORDX3_SADDR [[DEF1]], killed [[REG_SEQUENCE1]], [[DEF]], 4, 1, implicit $exec :: (store (s96) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -744,7 +744,7 @@ body: |
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF4]], %subreg.sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF5]], %subreg.sub3
; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 2, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 2, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -774,9 +774,9 @@ body: |
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF2]], %subreg.sub0, [[DEF3]], %subreg.sub1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE killed [[REG_SEQUENCE]], %subreg.sub0_sub1, [[DEF4]], %subreg.sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE killed [[REG_SEQUENCE1]], %subreg.sub0_sub1_sub2, [[DEF5]], %subreg.sub3
; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 3, implicit $exec :: (store (s128) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORDX4_SADDR [[DEF1]], killed [[REG_SEQUENCE2]], [[DEF]], 4, 3, implicit $exec :: (store (s128) into `ptr addrspace(1) undef`, align 4, addrspace 1)
; GCN-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[DEF6]], %subreg.sub0, [[DEF7]], %subreg.sub1
; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE3]], [[DEF]], 20, 3, implicit $exec :: (store (s64) into `i32 addrspace(1)* undef`, align 4, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[DEF1]], killed [[REG_SEQUENCE3]], [[DEF]], 20, 3, implicit $exec :: (store (s64) into `ptr addrspace(1) undef`, align 4, addrspace 1)
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -803,8 +803,8 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF1]], [[DEF3]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD [[DEF1]], [[DEF3]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -823,8 +823,8 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vreg_64_align2 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub1, [[DEF3]], [[DEF]], 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub0, [[DEF2]], [[DEF]], 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]].sub1, [[DEF3]], [[DEF]], 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:sreg_64_xexec = IMPLICIT_DEF
%1:vreg_64_align2 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
@ -843,8 +843,8 @@ body: |
; GCN-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[DEF2]], [[DEF]].sub0_sub1, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[DEF3]], [[DEF]].sub2_sub3, 4, 0, implicit $exec :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[DEF2]], [[DEF]].sub0_sub1, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR [[DEF1]], [[DEF3]], [[DEF]].sub2_sub3, 4, 0, implicit $exec :: (store (s32) into `ptr addrspace(1) undef`, addrspace 1)
%0:sgpr_128 = IMPLICIT_DEF
%1:vgpr_32 = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF

View File

@ -29,7 +29,7 @@ registers:
- { id: 1, class: gprb }
# RW-DEFAULT-NOMOVT: constants:
# RW-DEFAULT-NOMOVT: id: 0
# RW-DEFAULT-NOMOVT: value: 'i32* @internal_global'
# RW-DEFAULT-NOMOVT: value: 'ptr @internal_global'
# RWPI-NOMOVT: constants:
# RWPI-NOMOVT: id: 0
# RWPI-NOMOVT: value: 'internal_global(SBREL)'
@ -63,7 +63,7 @@ registers:
- { id: 1, class: gprb }
# RW-DEFAULT-NOMOVT: constants:
# RW-DEFAULT-NOMOVT: id: 0
# RW-DEFAULT-NOMOVT: value: 'i32* @external_global'
# RW-DEFAULT-NOMOVT: value: 'ptr @external_global'
# RWPI-NOMOVT: constants:
# RWPI-NOMOVT: id: 0
# RWPI-NOMOVT: value: 'external_global(SBREL)'
@ -97,7 +97,7 @@ registers:
- { id: 1, class: gprb }
# RO-DEFAULT-NOMOVT: constants:
# RO-DEFAULT-NOMOVT: id: 0
# RO-DEFAULT-NOMOVT: value: 'i32* @internal_constant'
# RO-DEFAULT-NOMOVT: value: 'ptr @internal_constant'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @internal_constant
@ -127,7 +127,7 @@ registers:
- { id: 1, class: gprb }
# RO-DEFAULT-NOMOVT: constants:
# RO-DEFAULT-NOMOVT: id: 0
# RO-DEFAULT-NOMOVT: value: 'i32* @external_constant'
# RO-DEFAULT-NOMOVT: value: 'ptr @external_constant'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @external_constant

View File

@ -21,7 +21,7 @@ registers:
- { id: 1, class: gprb }
# ELF-NOMOVT: constants:
# ELF-NOMOVT: id: 0
# ELF-NOMOVT: value: 'i32* @internal_global'
# ELF-NOMOVT: value: 'ptr @internal_global'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @internal_global
@ -51,7 +51,7 @@ registers:
- { id: 1, class: gprb }
# ELF-NOMOVT: constants:
# ELF-NOMOVT: id: 0
# ELF-NOMOVT: value: 'i32* @external_global'
# ELF-NOMOVT: value: 'ptr @external_global'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @external_global

View File

@ -29,7 +29,7 @@ registers:
- { id: 1, class: gprb }
# RW-DEFAULT-NOMOVT: constants:
# RW-DEFAULT-NOMOVT: id: 0
# RW-DEFAULT-NOMOVT: value: 'i32* @internal_global'
# RW-DEFAULT-NOMOVT: value: 'ptr @internal_global'
# RWPI-NOMOVT: constants:
# RWPI-NOMOVT: id: 0
# RWPI-NOMOVT: value: 'internal_global(SBREL)'
@ -63,7 +63,7 @@ registers:
- { id: 1, class: gprb }
# RW-DEFAULT-NOMOVT: constants:
# RW-DEFAULT-NOMOVT: id: 0
# RW-DEFAULT-NOMOVT: value: 'i32* @external_global'
# RW-DEFAULT-NOMOVT: value: 'ptr @external_global'
# RWPI-NOMOVT: constants:
# RWPI-NOMOVT: id: 0
# RWPI-NOMOVT: value: 'external_global(SBREL)'
@ -97,7 +97,7 @@ registers:
- { id: 1, class: gprb }
# RO-DEFAULT-NOMOVT: constants:
# RO-DEFAULT-NOMOVT: id: 0
# RO-DEFAULT-NOMOVT: value: 'i32* @internal_constant'
# RO-DEFAULT-NOMOVT: value: 'ptr @internal_constant'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @internal_constant
@ -127,7 +127,7 @@ registers:
- { id: 1, class: gprb }
# RO-DEFAULT-NOMOVT: constants:
# RO-DEFAULT-NOMOVT: id: 0
# RO-DEFAULT-NOMOVT: value: 'i32* @external_constant'
# RO-DEFAULT-NOMOVT: value: 'ptr @external_constant'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @external_constant

View File

@ -21,7 +21,7 @@ registers:
- { id: 1, class: gprb }
# ELF-NOMOVT: constants:
# ELF-NOMOVT: id: 0
# ELF-NOMOVT: value: 'i32* @internal_global'
# ELF-NOMOVT: value: 'ptr @internal_global'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @internal_global
@ -51,7 +51,7 @@ registers:
- { id: 1, class: gprb }
# ELF-NOMOVT: constants:
# ELF-NOMOVT: id: 0
# ELF-NOMOVT: value: 'i32* @external_global'
# ELF-NOMOVT: value: 'ptr @external_global'
body: |
bb.0:
%0(p0) = G_GLOBAL_VALUE @external_global

View File

@ -6,23 +6,23 @@ body: |
bb.0:
; CHECK-LABEL: name: aligned_memoperands
; CHECK: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`, align 2)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`, align 8)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, align 2)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, align 2)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, basealign 8)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`, align 2)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`, align 8)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12, align 2)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12, align 2)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12, basealign 8)
%0:_(p0) = IMPLICIT_DEF
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`)
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 2)
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 4) ; redundant
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 8)
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, align 2)
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, align 4) ; redundant
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 2) ; printed as "align"
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 4) ; redundant
%1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 8)
%1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`)
%1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`, align 2)
%1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`, align 4) ; redundant
%1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`, align 8)
%1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, align 2)
%1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, align 4) ; redundant
%1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, basealign 2) ; printed as "align"
%1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, basealign 4) ; redundant
%1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, basealign 8)
...

View File

@ -21,7 +21,7 @@ entry:
; MIPS-DAG: t{{[0-9]+}}: i32 = ADDiu Register:i32 $zero, TargetConstant:i32<1>
; MIPS-DAG: t{{[0-9]+}}: i32 = ADDiu Register:i32 $zero, TargetConstant:i32<2048>
; MIPS-DAG: t{{[0-9]+}}: i32 = LUi TargetConstant:i32<128>
; MIPS: t{{[0-9]+}}: ch,glue = JAL TargetGlobalAddress:i32<void (i32, i32, i32)* @f>
; MIPS: t{{[0-9]+}}: ch,glue = JAL TargetGlobalAddress:i32<ptr @f>
; MIPS: t[[A:[0-9]+]]: i32 = LUi TargetConstant:i32<2304>
; MIPS: t{{[0-9]+}}: i32 = ORi t[[A]], TargetConstant:i32<2>
@ -30,7 +30,7 @@ entry:
; MM-DAG: t{{[0-9]+}}: i32 = LI16_MM TargetConstant:i32<1>
; MM-DAG: t{{[0-9]+}}: i32 = ADDiu_MM Register:i32 $zero, TargetConstant:i32<2048>
; MM-DAG: t{{[0-9]+}}: i32 = LUi_MM TargetConstant:i32<128>
; MM: t{{[0-9]+}}: ch,glue = JAL_MM TargetGlobalAddress:i32<void (i32, i32, i32)* @f>
; MM: t{{[0-9]+}}: ch,glue = JAL_MM TargetGlobalAddress:i32<ptr @f>
; MM: t[[A:[0-9]+]]: i32 = LUi_MM TargetConstant:i32<2304>
; MM: t{{[0-9]+}}: i32 = ORi_MM t[[A]], TargetConstant:i32<2>

View File

@ -577,13 +577,13 @@ define double @fcmp_nnan(double %a, double %y, double %z) {
; FP library calls can have fast-math-flags.
; FMFDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'log2_approx:'
; FMFDEBUG: ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<double (double)* @log2>
; FMFDEBUG: ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<ptr @log2>
; FMFDEBUG: ch,glue = callseq_end t15, TargetConstant:i64<32>, TargetConstant:i64<0>, t15:1
; FMFDEBUG: f64,ch,glue = CopyFromReg t16, Register:f64 $f1, t16:1
; FMFDEBUG: Type-legalized selection DAG: %bb.0 'log2_approx:'
; GLOBALDEBUG-LABEL: Optimized lowered selection DAG: %bb.0 'log2_approx:'
; GLOBALDEBUG: ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<double (double)* @log2>
; GLOBALDEBUG: ch,glue = PPCISD::CALL_NOP t11, TargetGlobalAddress:i64<ptr @log2>
; GLOBALDEBUG: ch,glue = callseq_end t15, TargetConstant:i64<32>, TargetConstant:i64<0>, t15:1
; GLOBALDEBUG: f64,ch,glue = CopyFromReg t16, Register:f64 $f1, t16:1
; GLOBALDEBUG: Type-legalized selection DAG: %bb.0 'log2_approx:'

View File

@ -35,7 +35,7 @@ body: |
; CHECK: J %bb.4
; CHECK: bb.2:
; CHECK: successors:
; CHECK: STMux %20.subreg_l32, undef %8:addr64bit, 0, $noreg :: (store (s32) into `i32* undef`)
; CHECK: STMux %20.subreg_l32, undef %8:addr64bit, 0, $noreg :: (store (s32) into `ptr undef`)
; CHECK: bb.3:
; CHECK: successors:
; CHECK: bb.4:
@ -84,7 +84,7 @@ body: |
bb.2:
successors:
STMux killed %4, undef %22:addr64bit, 0, $noreg :: (store (s32) into `i32* undef`)
STMux killed %4, undef %22:addr64bit, 0, $noreg :: (store (s32) into `ptr undef`)
bb.3:
successors:

View File

@ -11,9 +11,9 @@ body: |
bb.0:
; CHECK-LABEL: name: foo
; CHECK: renamable $eax = IMPLICIT_DEF
; CHECK: renamable $edx = MOVZX32rm8 renamable $eax, 1, $noreg, 0, $noreg :: (load (s8) from `i168* undef` + 20, align 4, basealign 16)
; CHECK: dead renamable $ecx = MOV32rm renamable $eax, 1, $noreg, 0, $noreg :: (load (s32) from `i168* undef` + 12, basealign 16)
; CHECK: renamable $al = MOV8rm killed renamable $eax, 1, $noreg, 0, $noreg :: (load (s8) from `i32* undef`, align 4)
; CHECK: renamable $edx = MOVZX32rm8 renamable $eax, 1, $noreg, 0, $noreg :: (load (s8) from `ptr undef` + 20, align 4, basealign 16)
; CHECK: dead renamable $ecx = MOV32rm renamable $eax, 1, $noreg, 0, $noreg :: (load (s32) from `ptr undef` + 12, basealign 16)
; CHECK: renamable $al = MOV8rm killed renamable $eax, 1, $noreg, 0, $noreg :: (load (s8) from `ptr undef`, align 4)
; CHECK: dead renamable $ecx = COPY renamable $edx
; CHECK: dead renamable $ecx = COPY renamable $edx
; CHECK: dead renamable $ecx = COPY renamable $edx
@ -26,9 +26,9 @@ body: |
; CHECK: dead renamable $eax = SHRD32rrCL renamable $eax, killed renamable $edx, implicit-def dead $eflags, implicit killed $cl
; CHECK: RET32
%0:gr32 = IMPLICIT_DEF
%1:gr32 = MOVZX32rm8 %0, 1, $noreg, 0, $noreg :: (load (s8) from `i168* undef` + 20, align 4, basealign 16)
%2:gr32 = MOV32rm %0, 1, $noreg, 0, $noreg :: (load (s32) from `i168* undef` + 12, basealign 16)
%3:gr8 = MOV8rm %0, 1, $noreg, 0, $noreg :: (load (s8) from `i32* undef`, align 4)
%1:gr32 = MOVZX32rm8 %0, 1, $noreg, 0, $noreg :: (load (s8) from `ptr undef` + 20, align 4, basealign 16)
%2:gr32 = MOV32rm %0, 1, $noreg, 0, $noreg :: (load (s32) from `ptr undef` + 12, basealign 16)
%3:gr8 = MOV8rm %0, 1, $noreg, 0, $noreg :: (load (s8) from `ptr undef`, align 4)
%4:gr32 = COPY %1
%5:gr32 = COPY %1
%6:gr32 = COPY %1

View File

@ -6,7 +6,7 @@ define void @f() {
}
; CHECK-NOT: @llvm.global_dtors
; CHECK: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 1, void ()* @asan.module_ctor, i8* bitcast (void ()* @asan.module_ctor to i8*) }]
; CHECK: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @asan.module_ctor, ptr @asan.module_ctor }]
; CHECK-NOT: @llvm.global_dtors
; CHECK: define internal void @asan.module_ctor() #[[#]] comdat
; CHECK-NOT: @llvm.global_dtors

View File

@ -9,15 +9,15 @@ target triple = "x86_64-unknown-linux-gnu"
@c = internal global [2 x i32] zeroinitializer, align 4
@d = unnamed_addr global [2 x i32] zeroinitializer, align 4
; NOALIAS: @__asan_global_a = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @a to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.1 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
; NOALIAS-NEXT: @__asan_global_b = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @b to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.2 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
; NOALIAS-NEXT: @__asan_global_c = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @c to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.3 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
; NOALIAS-NEXT: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @d to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.4 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
; NOALIAS: @__asan_global_a = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @a to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.1 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
; NOALIAS-NEXT: @__asan_global_b = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @b to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.2 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
; NOALIAS-NEXT: @__asan_global_c = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @c to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.3 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
; NOALIAS-NEXT: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @d to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.4 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
; ALIAS: @__asan_global_a = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @0 to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.1 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
; ALIAS-NEXT: @__asan_global_b = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @1 to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.2 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
; ALIAS-NEXT: @__asan_global_c = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @2 to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.3 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
; ALIAS-NEXT: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint ({ [2 x i32], [24 x i8] }* @3 to i64), i64 8, i64 32, i64 ptrtoint ([2 x i8]* @___asan_gen_.4 to i64), i64 ptrtoint ([8 x i8]* @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
; ALIAS: @__asan_global_a = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @0 to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.1 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
; ALIAS-NEXT: @__asan_global_b = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @1 to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.2 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
; ALIAS-NEXT: @__asan_global_c = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @2 to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.3 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 -1 }
; ALIAS-NEXT: @__asan_global_d = private global { i64, i64, i64, i64, i64, i64, i64, i64 } { i64 ptrtoint (ptr @3 to i64), i64 8, i64 32, i64 ptrtoint (ptr @___asan_gen_.4 to i64), i64 ptrtoint (ptr @___asan_gen_ to i64), i64 0, i64 0, i64 0 }
; ALIAS: @0 = private alias {{.*}} @a
; ALIAS-NEXT: @1 = private alias {{.*}} @b
; ALIAS-NEXT: @2 = private alias {{.*}} @c

View File

@ -7,11 +7,11 @@ target triple = "x86_64-unknown-linux-gnu"
define i8 @add(i8 %a, i8 %b) {
; CHECK: @add.dfsan
; CHECK-DAG: %[[#ALABEL:]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[ARGTLSTYPE:\[100 x i64\]]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]]
; CHECK-DAG: %[[#BLABEL:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[ARGTLSTYPE]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
; CHECK-DAG: %[[#ALABEL:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
; CHECK-DAG: %[[#BLABEL:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
; CHECK: %[[#UNION:]] = or i[[#SBITS]] %[[#ALABEL]], %[[#BLABEL]]
; CHECK: %c = add i8 %a, %b
; CHECK: store i[[#SBITS]] %[[#UNION]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
; CHECK: store i[[#SBITS]] %[[#UNION]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; CHECK: ret i8 %c
%c = add i8 %a, %b
ret i8 %c

View File

@ -30,8 +30,8 @@ i1 %a190, i1 %a191, i1 %a192, i1 %a193, i1 %a194, i1 %a195, i1 %a196, i1 %a197,
i1 %a200
) {
; CHECK: @arg_overflow.dfsan
; CHECK: [[A199:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
; CHECK: store i32 [[A199]], i32* @__dfsan_retval_origin_tls, align 4
; CHECK: [[A199:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
; CHECK: store i32 [[A199]], ptr @__dfsan_retval_origin_tls, align 4
%r = add i1 %a199, %a200
ret i1 %r
@ -39,12 +39,12 @@ i1 %a200
define i1 @param_overflow(i1 %a) {
; CHECK: @param_overflow.dfsan
; CHECK: store i32 %1, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
; CHECK-NEXT: store i[[#SBITS]] %2, i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 398) to i[[#SBITS]]*), align 2
; CHECK-NEXT: store i[[#SBITS]] %2, i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 400) to i[[#SBITS]]*), align 2
; CHECK: store i32 %1, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
; CHECK-NEXT: store i[[#SBITS]] %2, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 398) to ptr), align 2
; CHECK-NEXT: store i[[#SBITS]] %2, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 400) to ptr), align 2
; CHECK-NEXT: %r = call i1 @arg_overflow.dfsan
; CHECK: %_dfsret_o = load i32, i32* @__dfsan_retval_origin_tls, align 4
; CHECK: store i32 %_dfsret_o, i32* @__dfsan_retval_origin_tls, align 4
; CHECK: %_dfsret_o = load i32, ptr @__dfsan_retval_origin_tls, align 4
; CHECK: store i32 %_dfsret_o, ptr @__dfsan_retval_origin_tls, align 4
%r = call i1 @arg_overflow(
i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a, i1 %a,
@ -76,7 +76,7 @@ declare void @foo(i1 %a)
define void @param_with_zero_shadow() {
; CHECK: @param_with_zero_shadow.dfsan
; CHECK-NEXT: store i[[#SBITS]] 0, i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2
; CHECK-NEXT: store i[[#SBITS]] 0, ptr @__dfsan_arg_tls, align 2
; CHECK-NEXT: call void @foo.dfsan(i1 true)
call void @foo(i1 1)

View File

@ -9,10 +9,10 @@ target triple = "x86_64-unknown-linux-gnu"
define i32 @phiop(i32 %a, i32 %b, i1 %c) {
; CHECK: @phiop.dfsan
; CHECK: entry:
; CHECK: [[BO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; CHECK: [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
; CHECK: [[BS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN:2]]
; CHECK: [[AS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
; CHECK: [[AS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; CHECK: br i1 %c, label %next, label %done
; CHECK: next:
; CHECK: br i1 %c, label %T, label %F
@ -26,7 +26,7 @@ define i32 @phiop(i32 %a, i32 %b, i1 %c) {
; CHECK: br label %done
; CHECK: done:
; CHECK: [[PO:%.*]] = phi i32 [ [[BAO_T]], %T ], [ [[BAO_F]], %F ], [ [[AO]], %entry ]
; CHECK: store i32 [[PO]], i32* @__dfsan_retval_origin_tls, align 4
; CHECK: store i32 [[PO]], ptr @__dfsan_retval_origin_tls, align 4
entry:
br i1 %c, label %next, label %done

View File

@ -10,20 +10,20 @@ target triple = "x86_64-unknown-linux-gnu"
define i8 @select8(i1 %c, i8 %t, i8 %f) {
; TRACK_CONTROL_FLOW: @select8.dfsan
; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2
; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
; TRACK_CONTROL_FLOW: [[TFO:%.*]] = select i1 %c, i32 [[TO]], i32 [[FO]]
; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i[[#SBITS]] [[CS]], 0
; TRACK_CONTROL_FLOW: [[CTFO:%.*]] = select i1 [[CS_NE]], i32 [[CO]], i32 [[TFO]]
; TRACK_CONTROL_FLOW: store i32 [[CTFO]], i32* @__dfsan_retval_origin_tls, align 4
; TRACK_CONTROL_FLOW: store i32 [[CTFO]], ptr @__dfsan_retval_origin_tls, align 4
; NO_TRACK_CONTROL_FLOW: @select8.dfsan
; NO_TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
; NO_TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; NO_TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
; NO_TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; NO_TRACK_CONTROL_FLOW: [[TFO:%.*]] = select i1 %c, i32 [[TO]], i32 [[FO]]
; NO_TRACK_CONTROL_FLOW: store i32 [[TFO]], i32* @__dfsan_retval_origin_tls, align 4
; NO_TRACK_CONTROL_FLOW: store i32 [[TFO]], ptr @__dfsan_retval_origin_tls, align 4
%a = select i1 %c, i8 %t, i8 %f
ret i8 %a
@ -31,16 +31,16 @@ define i8 @select8(i1 %c, i8 %t, i8 %f) {
define i8 @select8e(i1 %c, i8 %tf) {
; TRACK_CONTROL_FLOW: @select8e.dfsan
; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
; TRACK_CONTROL_FLOW: [[TFO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2
; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
; TRACK_CONTROL_FLOW: [[TFO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i[[#SBITS]] [[CS]], 0
; TRACK_CONTROL_FLOW: [[CTFO:%.*]] = select i1 [[CS_NE]], i32 [[CO]], i32 [[TFO]]
; TRACK_CONTROL_FLOW: store i32 [[CTFO]], i32* @__dfsan_retval_origin_tls, align 4
; TRACK_CONTROL_FLOW: store i32 [[CTFO]], ptr @__dfsan_retval_origin_tls, align 4
; NO_TRACK_CONTROL_FLOW: @select8e.dfsan
; NO_TRACK_CONTROL_FLOW: [[TFO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; NO_TRACK_CONTROL_FLOW: store i32 [[TFO]], i32* @__dfsan_retval_origin_tls, align 4
; NO_TRACK_CONTROL_FLOW: [[TFO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; NO_TRACK_CONTROL_FLOW: store i32 [[TFO]], ptr @__dfsan_retval_origin_tls, align 4
%a = select i1 %c, i8 %tf, i8 %tf
ret i8 %a
@ -48,24 +48,24 @@ define i8 @select8e(i1 %c, i8 %tf) {
define <4 x i8> @select8v(<4 x i1> %c, <4 x i8> %t, <4 x i8> %f) {
; TRACK_CONTROL_FLOW: @select8v.dfsan
; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align 4
; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align 2
; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align 2
; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
; TRACK_CONTROL_FLOW: [[FS_NE:%.*]] = icmp ne i[[#SBITS]] [[FS]], 0
; TRACK_CONTROL_FLOW: [[FTO:%.*]] = select i1 [[FS_NE]], i32 [[FO]], i32 [[TO]]
; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i[[#SBITS]] [[CS]], 0
; TRACK_CONTROL_FLOW: [[CFTO:%.*]] = select i1 [[CS_NE]], i32 [[CO]], i32 [[FTO]]
; TRACK_CONTROL_FLOW: store i32 [[CFTO]], i32* @__dfsan_retval_origin_tls, align 4
; TRACK_CONTROL_FLOW: store i32 [[CFTO]], ptr @__dfsan_retval_origin_tls, align 4
; NO_TRACK_CONTROL_FLOW: @select8v.dfsan
; NO_TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
; NO_TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; NO_TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align 2
; NO_TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
; NO_TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
; NO_TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
; NO_TRACK_CONTROL_FLOW: [[FS_NE:%.*]] = icmp ne i[[#SBITS]] [[FS]], 0
; NO_TRACK_CONTROL_FLOW: [[FTO:%.*]] = select i1 [[FS_NE]], i32 [[FO]], i32 [[TO]]
; NO_TRACK_CONTROL_FLOW: store i32 [[FTO]], i32* @__dfsan_retval_origin_tls, align 4
; NO_TRACK_CONTROL_FLOW: store i32 [[FTO]], ptr @__dfsan_retval_origin_tls, align 4
%a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f
ret <4 x i8> %a

View File

@ -6,11 +6,11 @@ target triple = "x86_64-unknown-linux-gnu"
; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
define {i32, i32} @test({i32, i32} %a, i1 %c) {
; CHECK: %[[#AL:]] = load { i[[#SBITS]], i[[#SBITS]] }, { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_arg_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN:2]]
; CHECK: %[[#AL:]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
; CHECK: %[[#AL0:]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } %[[#AL]], i[[#SBITS]] 0, 0
; CHECK: %[[#AL1:]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } %[[#AL]], i[[#SBITS]] 0, 1
; CHECK: %[[#PL:]] = phi { i[[#SBITS]], i[[#SBITS]] } [ %[[#AL0]], %T ], [ %[[#AL1]], %F ]
; CHECK: store { i[[#SBITS]], i[[#SBITS]] } %[[#PL]], { i[[#SBITS]], i[[#SBITS]] }* bitcast ([100 x i64]* @__dfsan_retval_tls to { i[[#SBITS]], i[[#SBITS]] }*), align [[ALIGN]]
; CHECK: store { i[[#SBITS]], i[[#SBITS]] } %[[#PL]], ptr @__dfsan_retval_tls, align [[ALIGN]]
entry:
br i1 %c, label %T, label %F

View File

@ -10,22 +10,22 @@ target triple = "x86_64-unknown-linux-gnu"
define i8 @select8(i1 %c, i8 %t, i8 %f) {
; TRACK_CF: @select8.dfsan
; TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]]
; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
; TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; TRACK_CF: %[[#R+3]] = select i1 %c, i[[#SBITS]] %[[#R+1]], i[[#SBITS]] %[[#R]]
; TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+2]], %[[#R+3]]
; TRACK_CF: %a = select i1 %c, i8 %t, i8 %f
; TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
; TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; TRACK_CF: ret i8 %a
; NO_TRACK_CF: @select8.dfsan
; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]]
; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; NO_TRACK_CF: %[[#R+3]] = select i1 %c, i[[#SBITS]] %[[#R+1]], i[[#SBITS]] %[[#R]]
; NO_TRACK_CF: %a = select i1 %c, i8 %t, i8 %f
; NO_TRACK_CF: store i[[#SBITS]] %[[#R+3]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
; NO_TRACK_CF: store i[[#SBITS]] %[[#R+3]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; NO_TRACK_CF: ret i8 %a
%a = select i1 %c, i8 %t, i8 %f
@ -34,18 +34,18 @@ define i8 @select8(i1 %c, i8 %t, i8 %f) {
define i8 @select8e(i1 %c, i8 %tf) {
; TRACK_CF: @select8e.dfsan
; TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
; TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+1]], %[[#R]]
; TRACK_CF: %a = select i1 %c, i8 %tf, i8 %tf
; TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
; TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; TRACK_CF: ret i8 %a
; NO_TRACK_CF: @select8e.dfsan
; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; NO_TRACK_CF: %a = select i1 %c, i8 %tf, i8 %tf
; NO_TRACK_CF: store i[[#SBITS]] %[[#R]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
; NO_TRACK_CF: store i[[#SBITS]] %[[#R]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; NO_TRACK_CF: ret i8 %a
%a = select i1 %c, i8 %tf, i8 %tf
@ -54,22 +54,22 @@ define i8 @select8e(i1 %c, i8 %tf) {
define <4 x i8> @select8v(<4 x i1> %c, <4 x i8> %t, <4 x i8> %f) {
; TRACK_CF: @select8v.dfsan
; TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]]
; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
; TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; TRACK_CF: %[[#R+3]] = or i[[#SBITS]] %[[#R+1]], %[[#R]]
; TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+2]], %[[#R+3]]
; TRACK_CF: %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f
; TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
; TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; TRACK_CF: ret <4 x i8> %a
; NO_TRACK_CF: @select8v.dfsan
; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 4) to i[[#SBITS]]*), align [[ALIGN:2]]
; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 2) to i[[#SBITS]]*), align [[ALIGN]]
; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]]
; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
; NO_TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+1]], %[[#R]]
; NO_TRACK_CF: %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f
; NO_TRACK_CF: store i[[#SBITS]] %[[#RO]], i[[#SBITS]]* bitcast ([100 x i64]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]]
; NO_TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
; NO_TRACK_CF: ret <4 x i8> %a
%a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f

View File

@ -9,7 +9,7 @@
define i32 @m() {
; CHECK-LABEL: @m.dfsan
; CHECK: %{{.*}} = call zeroext i16 @__dfsw_dfsan_get_label(i64 signext 56, i[[#SBITS]] zeroext 0, i[[#SBITS]]* %{{.*}})
; CHECK: %{{.*}} = call zeroext i16 @__dfsw_dfsan_get_label(i64 signext 56, i[[#SBITS]] zeroext 0, ptr %{{.*}})
entry:
%call = call zeroext i16 @dfsan_get_label(i64 signext 56)
@ -19,7 +19,7 @@ entry:
define i32 @k() {
; CHECK-LABEL: @k.dfsan
; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k2(i64 signext 56, i64 signext 67, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]]* %{{.*}})
; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k2(i64 signext 56, i64 signext 67, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, ptr %{{.*}})
entry:
%call = call zeroext i16 @k2(i64 signext 56, i64 signext 67)
@ -29,7 +29,7 @@ entry:
define i32 @k3() {
; CHECK-LABEL: @k3.dfsan
; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]]* %{{.*}})
; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, ptr %{{.*}})
entry:
%call = call zeroext i16 @k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89)
@ -39,17 +39,17 @@ entry:
declare zeroext i16 @dfsan_get_label(i64 signext)
; CHECK-LABEL: @"dfsw$dfsan_get_label"
; CHECK: %{{.*}} = call i16 @__dfsw_dfsan_get_label(i64 %0, i[[#SBITS]] zeroext %1, i[[#SBITS]]* %{{.*}})
; CHECK: %{{.*}} = call i16 @__dfsw_dfsan_get_label(i64 %0, i[[#SBITS]] zeroext %1, ptr %{{.*}})
declare zeroext i16 @k2(i64 signext, i64 signext)
; CHECK-LABEL: @"dfsw$k2"
; CHECK: %{{.*}} = call i16 @__dfsw_k2(i64 %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]]* %{{.*}})
; CHECK: %{{.*}} = call i16 @__dfsw_k2(i64 %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, ptr %{{.*}})
declare zeroext i16 @k4(i64 signext, i64 signext, i64 signext, i64 signext)
; CHECK-LABEL: @"dfsw$k4"
; CHECK: %{{.*}} = call i16 @__dfsw_k4(i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]]* %{{.*}})
; CHECK: %{{.*}} = call i16 @__dfsw_k4(i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, ptr %{{.*}})
; CHECK: declare zeroext i16 @__dfsw_dfsan_get_label(i64 signext, i[[#SBITS]], i[[#SBITS]]*)
; CHECK: declare zeroext i16 @__dfsw_k2(i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*)
; CHECK: declare zeroext i16 @__dfsw_k4(i64 signext, i64 signext, i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]]*)
; CHECK: declare zeroext i16 @__dfsw_dfsan_get_label(i64 signext, i[[#SBITS]], ptr)
; CHECK: declare zeroext i16 @__dfsw_k2(i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], ptr)
; CHECK: declare zeroext i16 @__dfsw_k4(i64 signext, i64 signext, i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], ptr)

View File

@ -3,28 +3,28 @@
; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint ([0 x i8]* @__start_hwasan_globals to i64), i64 ptrtoint ({ i32, i32, i32, [8 x i8], i32, i32 }* @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint ([0 x i8]* @__stop_hwasan_globals to i64), i64 ptrtoint ({ i32, i32, i32, [8 x i8], i32, i32 }* @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
; CHECK: @four.hwasan = private global { i32, [12 x i8] } { i32 1, [12 x i8] c"\00\00\00\00\00\00\00\00\00\00\00," }, align 16
; CHECK: @four.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint ({ i32, [12 x i8] }* @four.hwasan to i64), i64 ptrtoint ({ i32, i32 }* @four.hwasan.descriptor to i64)) to i32), i32 738197508 }, section "hwasan_globals", !associated [[FOUR:![0-9]+]]
; CHECK: @four.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @four.hwasan to i64), i64 ptrtoint (ptr @four.hwasan.descriptor to i64)) to i32), i32 738197508 }, section "hwasan_globals", !associated [[FOUR:![0-9]+]]
; CHECK: @sixteen.hwasan = private global [16 x i8] zeroinitializer, align 16
; CHECK: @sixteen.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint ([16 x i8]* @sixteen.hwasan to i64), i64 ptrtoint ({ i32, i32 }* @sixteen.hwasan.descriptor to i64)) to i32), i32 754974736 }, section "hwasan_globals", !associated [[SIXTEEN:![0-9]+]]
; CHECK: @sixteen.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @sixteen.hwasan to i64), i64 ptrtoint (ptr @sixteen.hwasan.descriptor to i64)) to i32), i32 754974736 }, section "hwasan_globals", !associated [[SIXTEEN:![0-9]+]]
; CHECK: @huge.hwasan = private global [16777232 x i8] zeroinitializer, align 16
; CHECK: @huge.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint ([16777232 x i8]* @huge.hwasan to i64), i64 ptrtoint ({ i32, i32 }* @huge.hwasan.descriptor to i64)) to i32), i32 788529136 }, section "hwasan_globals", !associated [[HUGE:![0-9]+]]
; CHECK: @huge.hwasan.descriptor.1 = private constant { i32, i32 } { i32 trunc (i64 add (i64 sub (i64 ptrtoint ([16777232 x i8]* @huge.hwasan to i64), i64 ptrtoint ({ i32, i32 }* @huge.hwasan.descriptor.1 to i64)), i64 16777200) to i32), i32 771751968 }, section "hwasan_globals", !associated [[HUGE]]
; CHECK: @huge.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor to i64)) to i32), i32 788529136 }, section "hwasan_globals", !associated [[HUGE:![0-9]+]]
; CHECK: @huge.hwasan.descriptor.1 = private constant { i32, i32 } { i32 trunc (i64 add (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor.1 to i64)), i64 16777200) to i32), i32 771751968 }, section "hwasan_globals", !associated [[HUGE]]
; CHECK: @four = alias i32, inttoptr (i64 add (i64 ptrtoint ({ i32, [12 x i8] }* @four.hwasan to i64), i64 6341068275337658368) to i32*)
; CHECK: @sixteen = alias [16 x i8], inttoptr (i64 add (i64 ptrtoint ([16 x i8]* @sixteen.hwasan to i64), i64 6485183463413514240) to [16 x i8]*)
; CHECK: @huge = alias [16777232 x i8], inttoptr (i64 add (i64 ptrtoint ([16777232 x i8]* @huge.hwasan to i64), i64 6629298651489370112) to [16777232 x i8]*)
; CHECK: @four = alias i32, inttoptr (i64 add (i64 ptrtoint (ptr @four.hwasan to i64), i64 6341068275337658368) to ptr)
; CHECK: @sixteen = alias [16 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @sixteen.hwasan to i64), i64 6485183463413514240) to ptr)
; CHECK: @huge = alias [16777232 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @huge.hwasan to i64), i64 6629298651489370112) to ptr)
; CHECK: [[NOTE]] = !{{{{}} i32, i32, i32, [8 x i8], i32, i32 }* @hwasan.note}
; CHECK: [[FOUR]] = !{{{{}} i32, [12 x i8] }* @four.hwasan}
; CHECK: [[SIXTEEN]] = !{[16 x i8]* @sixteen.hwasan}
; CHECK: [[HUGE]] = !{[16777232 x i8]* @huge.hwasan}
; CHECK: [[NOTE]] = !{ptr @hwasan.note}
; CHECK: [[FOUR]] = !{ptr @four.hwasan}
; CHECK: [[SIXTEEN]] = !{ptr @sixteen.hwasan}
; CHECK: [[HUGE]] = !{ptr @huge.hwasan}
source_filename = "foo"

View File

@ -12,13 +12,13 @@ define i32 @_Z1fv() {
}
; CHECK-LABEL: define i32 @_Z1fv
; CHECK: order_file_entry
; CHECK: %[[T1:.+]] = load i8, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @bitmap_0, i32 0, i32 0
; CHECK: store i8 1, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @bitmap_0, i32 0, i32 0)
; CHECK: %[[T1:.+]] = load i8, ptr @bitmap_0
; CHECK: store i8 1, ptr @bitmap_0
; CHECK: %[[T2:.+]] = icmp eq i8 %[[T1]], 0
; CHECK: br i1 %[[T2]], label %order_file_set, label
; CHECK: order_file_set
; CHECK: %[[T3:.+]] = atomicrmw add i32* @_llvm_order_file_buffer_idx, i32 1 seq_cst
; CHECK: %[[T3:.+]] = atomicrmw add ptr @_llvm_order_file_buffer_idx, i32 1 seq_cst
; CHECK: %[[T5:.+]] = and i32 %[[T3]], 131071
; CHECK: %[[T4:.+]] = getelementptr [131072 x i64], [131072 x i64]* @_llvm_order_file_buffer, i32 0, i32 %[[T5]]
; CHECK: store i64 {{.*}}, i64* %[[T4]]
; CHECK: %[[T4:.+]] = getelementptr [131072 x i64], ptr @_llvm_order_file_buffer, i32 0, i32 %[[T5]]
; CHECK: store i64 {{.*}}, ptr %[[T4]]

View File

@ -4,36 +4,36 @@
; CHECK: @"__A8764FDD_x@c" = internal unnamed_addr global i8 1, section ".just.my.code", align 1, !dbg !5
; CHECK: define void @l1() !dbg !12 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__7DF23CF5_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__7DF23CF5_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @l2() !dbg !16 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__7DF23CF5_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__7DF23CF5_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w1() !dbg !18 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w2() !dbg !19 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w3() !dbg !21 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w4() !dbg !23 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define weak void @__CheckForDebuggerJustMyCode(i8* noundef %0) unnamed_addr {
; CHECK: define weak void @__CheckForDebuggerJustMyCode(ptr noundef %0) unnamed_addr {
; CHECK: ret void
; CHECK: }

View File

@ -3,18 +3,18 @@
; CHECK: $_JustMyCode_Default = comdat any
; CHECK: @"_A8764FDD_x@c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !0
; CHECK: @llvm.used = appending global [1 x i8*] [i8* bitcast (void (i8*)* @_JustMyCode_Default to i8*)], section "llvm.metadata"
; CHECK: @llvm.used = appending global [1 x ptr] [ptr @_JustMyCode_Default], section "llvm.metadata"
; CHECK: define void @w1() #0 !dbg !10 {
; CHECK: call x86_fastcallcc void @__CheckForDebuggerJustMyCode(i8* inreg noundef @"_A8764FDD_x@c")
; CHECK: call x86_fastcallcc void @__CheckForDebuggerJustMyCode(ptr inreg noundef @"_A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @_JustMyCode_Default(i8* inreg noundef %0) unnamed_addr comdat {
; CHECK: define void @_JustMyCode_Default(ptr inreg noundef %0) unnamed_addr comdat {
; CHECK: ret void
; CHECK: }
; CHECK: declare x86_fastcallcc void @__CheckForDebuggerJustMyCode(i8* inreg noundef) unnamed_addr
; CHECK: declare x86_fastcallcc void @__CheckForDebuggerJustMyCode(ptr inreg noundef) unnamed_addr
; CHECK: !0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
; CHECK: !1 = distinct !DIGlobalVariable(name: "_A8764FDD_x@c", scope: !2, file: !3, type: !5, isLocal: true, isDefinition: true)

View File

@ -5,61 +5,61 @@
; CHECK: $__JustMyCode_Default = comdat any
; CHECK: @"__7DF23CF5_x@c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !0
; CHECK: @llvm.used = appending global [1 x i8*] [i8* bitcast (void (i8*)* @__JustMyCode_Default to i8*)], section "llvm.metadata"
; CHECK: @llvm.used = appending global [1 x ptr] [ptr @__JustMyCode_Default], section "llvm.metadata"
; CHECK: @"__A8764FDD_x@c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !5
; CHECK: @"__0C712A50_x@c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !9
; CHECK: @"__A3605329_x@c" = internal unnamed_addr global i8 1, section ".msvcjmc", align 1, !dbg !12
; CHECK: define void @l1() !dbg !19 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__7DF23CF5_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__7DF23CF5_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @l2() !dbg !23 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__7DF23CF5_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__7DF23CF5_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w1() !dbg !25 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w2() !dbg !26 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w3() !dbg !28 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w4() !dbg !30 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A8764FDD_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A8764FDD_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w5() !dbg !32 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__0C712A50_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__0C712A50_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w6() !dbg !33 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__A3605329_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__A3605329_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @w7() !dbg !34 {
; CHECK: call void @__CheckForDebuggerJustMyCode(i8* noundef @"__0C712A50_x@c")
; CHECK: call void @__CheckForDebuggerJustMyCode(ptr noundef @"__0C712A50_x@c")
; CHECK: ret void
; CHECK: }
; CHECK: define void @__JustMyCode_Default(i8* noundef %0) unnamed_addr comdat {
; CHECK: define void @__JustMyCode_Default(ptr noundef %0) unnamed_addr comdat {
; CHECK: ret void
; CHECK: }
; CHECK: declare void @__CheckForDebuggerJustMyCode(i8* noundef) unnamed_addr
; CHECK: declare void @__CheckForDebuggerJustMyCode(ptr noundef) unnamed_addr
; CHECK: !llvm.linker.options = !{!18}

View File

@ -52,73 +52,73 @@ attributes #1 = { sanitize_memory }
; CHECK-LABEL: @bar
; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 24
; CHECK: [[V:%.*]] = zext {{.*}}
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[V]], {{.*}} [[M]]
; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 36
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i32*
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 40
; CHECK: [[V:%.*]] = sext {{.*}}
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[V]], {{.*}} [[M]]
; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 48
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 160
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 168
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 176
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 184
; CHECK: [[V:%.*]] = zext {{.*}}
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[V]], {{.*}} [[M]]
; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 192
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 204
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i32*
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 208
; CHECK: [[V:%.*]] = sext {{.*}}
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[V]], {{.*}} [[M]]
; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 216
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
; CHECK: [[B:%.*]] = ptrtoint [100 x i64]* %va_arg_shadow to i64
; CHECK: [[B:%.*]] = ptrtoint ptr %va_arg_shadow to i64
; CHECK: [[S:%.*]] = add i64 [[B]], 224
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to i64*
; CHECK: [[M:%_msarg_va_s.*]] = inttoptr i64 [[S]] to ptr
; CHECK: store {{.*}} [[M]]
; CHECK: store {{.*}} 72, {{.*}} %va_arg_overflow_size

View File

@ -29,6 +29,6 @@ entry:
; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
; CHECK-LABEL: @many_args
; CHECK: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 792)
; CHECK-NOT: i64 add (i64 ptrtoint ([100 x i64]* @__msan_va_arg_tls to i64), i64 800)
; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
declare i64 @sum(i64 %n, ...)

View File

@ -8,16 +8,16 @@ target triple = "x86_64-unknown-linux-gnu"
define <4 x i64> @test_mm256_abs_epi8(<4 x i64> %a) local_unnamed_addr #0 {
; CHECK-LABEL: @test_mm256_abs_epi8(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CHECK: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <32 x i8>
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
; CHECK-NEXT: [[TMP4:%.*]] = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> [[TMP3]], i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <32 x i8> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <32 x i8> [[TMP4]] to <4 x i64>
; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
; CHECK-NEXT: store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; CHECK: ret <4 x i64> [[TMP6]]
;
entry:
@ -30,16 +30,16 @@ entry:
define <4 x i64> @test_mm256_abs_epi16(<4 x i64> %a) local_unnamed_addr #0 {
; CHECK-LABEL: @test_mm256_abs_epi16(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CHECK: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <16 x i16>
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <16 x i16>
; CHECK-NEXT: [[TMP4:%.*]] = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> [[TMP3]], i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i16> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i16> [[TMP4]] to <4 x i64>
; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
; CHECK-NEXT: store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; CHECK: ret <4 x i64> [[TMP6]]
;
entry:
@ -52,16 +52,16 @@ entry:
define <4 x i64> @test_mm256_abs_epi32(<4 x i64> %a) local_unnamed_addr #0 {
; CHECK-LABEL: @test_mm256_abs_epi32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CHECK: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <8 x i32>
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <8 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> [[TMP3]], i1 false)
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP4]] to <4 x i64>
; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
; CHECK-NEXT: store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; CHECK: ret <4 x i64> [[TMP6]]
;
entry:
@ -74,12 +74,12 @@ entry:
define <4 x double> @test_fabs(<4 x double> %a) local_unnamed_addr #0 {
; CHECK-LABEL: @test_fabs(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
; CHECK: call void @llvm.donothing()
; CHECK-NEXT: [[TMP2:%.*]] = tail call <4 x double> @llvm.fabs.v4f64(<4 x double> [[A:%.*]])
; CHECK-NEXT: store <4 x i64> [[TMP0]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
; CHECK-NEXT: store <4 x i64> [[TMP0]], ptr @__msan_retval_tls, align 8
; ORIGIN-NEXT: store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
; CHECK: ret <4 x double> [[TMP2]]
;
entry:

View File

@ -15,11 +15,11 @@ entry:
}
; CHECK-LABEL: @InsertValue(
; CHECK-DAG: [[Sx:%.*]] = load i32, i32* {{.*}}@__msan_param_tls to i32*)
; CHECK-DAG: [[Sy:%.*]] = load i32, i32* {{.*}}@__msan_param_tls to i64), i64 8) to i32*)
; CHECK-DAG: [[Sx:%.*]] = load i32, ptr @__msan_param_tls
; CHECK-DAG: [[Sy:%.*]] = load i32, ptr {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK: [[A:%.*]] = insertvalue [2 x i32] [i32 -1, i32 -1], i32 [[Sx]], 0
; CHECK: [[B:%.*]] = insertvalue [2 x i32] [[A]], i32 [[Sy]], 1
; CHECK: store [2 x i32] [[B]], [2 x i32]* {{.*}}@__msan_retval_tls
; CHECK: store [2 x i32] [[B]], ptr {{.*}}@__msan_retval_tls
; CHECK: ret [2 x i32]
@ -31,11 +31,11 @@ entry:
}
; CHECK-LABEL: @InsertValueDouble(
; CHECK-DAG: [[Sx:%.*]] = load i64, i64* getelementptr {{.*}}@__msan_param_tls, i32 0, i32 0
; CHECK-DAG: [[Sy:%.*]] = load i64, i64* {{.*}}@__msan_param_tls to i64), i64 8) to i64*)
; CHECK-DAG: [[Sx:%.*]] = load i64, ptr @__msan_param_tls
; CHECK-DAG: [[Sy:%.*]] = load i64, ptr {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK: [[A:%.*]] = insertvalue [2 x i64] [i64 -1, i64 -1], i64 [[Sx]], 0
; CHECK: [[B:%.*]] = insertvalue [2 x i64] [[A]], i64 [[Sy]], 1
; CHECK: store [2 x i64] [[B]], [2 x i64]* {{.*}}@__msan_retval_tls
; CHECK: store [2 x i64] [[B]], ptr {{.*}}@__msan_retval_tls
; CHECK: ret [2 x double]
@ -46,9 +46,9 @@ entry:
}
; CHECK-LABEL: @ExtractValue(
; CHECK: [[Sa:%.*]] = load [2 x i32], [2 x i32]* {{.*}}@__msan_param_tls to [2 x i32]*)
; CHECK: [[Sa:%.*]] = load [2 x i32], ptr @__msan_param_tls
; CHECK: [[Sx:%.*]] = extractvalue [2 x i32] [[Sa]], 1
; CHECK: store i32 [[Sx]], i32* {{.*}}@__msan_retval_tls
; CHECK: store i32 [[Sx]], ptr @__msan_retval_tls
; CHECK: ret i32
@ -62,9 +62,9 @@ define i32 @ArrayInStruct(%MyStruct %s) sanitize_memory {
}
; CHECK-LABEL: @ArrayInStruct(
; CHECK: [[Ss:%.*]] = load { i32, i32, [3 x i32] }, { i32, i32, [3 x i32] }* {{.*}}@__msan_param_tls to { i32, i32, [3 x i32] }*)
; CHECK: [[Ss:%.*]] = load { i32, i32, [3 x i32] }, ptr @__msan_param_tls
; CHECK: [[Sx:%.*]] = extractvalue { i32, i32, [3 x i32] } [[Ss]], 2, 1
; CHECK: store i32 [[Sx]], i32* {{.*}}@__msan_retval_tls
; CHECK: store i32 [[Sx]], ptr @__msan_retval_tls
; CHECK: ret i32
@ -74,9 +74,9 @@ define i32 @ArrayOfStructs([3 x { i32, i32 }] %a) sanitize_memory {
}
; CHECK-LABEL: @ArrayOfStructs(
; CHECK: [[Ss:%.*]] = load [3 x { i32, i32 }], [3 x { i32, i32 }]* {{.*}}@__msan_param_tls to [3 x { i32, i32 }]*)
; CHECK: [[Ss:%.*]] = load [3 x { i32, i32 }], ptr @__msan_param_tls
; CHECK: [[Sx:%.*]] = extractvalue [3 x { i32, i32 }] [[Ss]], 2, 1
; CHECK: store i32 [[Sx]], i32* {{.*}}@__msan_retval_tls
; CHECK: store i32 [[Sx]], ptr @__msan_retval_tls
; CHECK: ret i32
@ -86,7 +86,7 @@ define <8 x i16> @ArrayOfVectors([3 x <8 x i16>] %a) sanitize_memory {
}
; CHECK-LABEL: @ArrayOfVectors(
; CHECK: [[Ss:%.*]] = load [3 x <8 x i16>], [3 x <8 x i16>]* {{.*}}@__msan_param_tls to [3 x <8 x i16>]*)
; CHECK: [[Ss:%.*]] = load [3 x <8 x i16>], ptr @__msan_param_tls
; CHECK: [[Sx:%.*]] = extractvalue [3 x <8 x i16>] [[Ss]], 1
; CHECK: store <8 x i16> [[Sx]], <8 x i16>* {{.*}}@__msan_retval_tls
; CHECK: store <8 x i16> [[Sx]], ptr @__msan_retval_tls
; CHECK: ret <8 x i16>

View File

@ -22,7 +22,7 @@ entry:
}
; CHECK-LABEL: @Test_bzhi_32(
; CHECK-DAG: %[[SA:.*]] = load i32, {{.*}}@__msan_param_tls to i32*)
; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32
@ -38,7 +38,7 @@ entry:
}
; CHECK-LABEL: @Test_bzhi_64(
; CHECK-DAG: %[[SA:.*]] = load i64, {{.*}}@__msan_param_tls, i32 0, i32 0
; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64
@ -55,7 +55,7 @@ entry:
}
; CHECK-LABEL: @Test_bextr_32(
; CHECK-DAG: %[[SA:.*]] = load i32, {{.*}}@__msan_param_tls to i32*)
; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32
@ -71,7 +71,7 @@ entry:
}
; CHECK-LABEL: @Test_bextr_64(
; CHECK-DAG: %[[SA:.*]] = load i64, {{.*}}@__msan_param_tls, i32 0, i32 0
; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64
@ -88,7 +88,7 @@ entry:
}
; CHECK-LABEL: @Test_pdep_32(
; CHECK-DAG: %[[SA:.*]] = load i32, {{.*}}@__msan_param_tls to i32*)
; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32
@ -104,7 +104,7 @@ entry:
}
; CHECK-LABEL: @Test_pdep_64(
; CHECK-DAG: %[[SA:.*]] = load i64, {{.*}}@__msan_param_tls, i32 0, i32 0
; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64
@ -120,7 +120,7 @@ entry:
}
; CHECK-LABEL: @Test_pext_32(
; CHECK-DAG: %[[SA:.*]] = load i32, {{.*}}@__msan_param_tls to i32*)
; CHECK-DAG: %[[SA:.*]] = load i32, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i32, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i32 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i32
@ -136,7 +136,7 @@ entry:
}
; CHECK-LABEL: @Test_pext_64(
; CHECK-DAG: %[[SA:.*]] = load i64, {{.*}}@__msan_param_tls, i32 0, i32 0
; CHECK-DAG: %[[SA:.*]] = load i64, ptr @__msan_param_tls
; CHECK-DAG: %[[SB:.*]] = load i64, {{.*}}@__msan_param_tls to i64), i64 8)
; CHECK-DAG: %[[SB0:.*]] = icmp ne i64 %[[SB]], 0
; CHECK-DAG: %[[SB1:.*]] = sext i1 %[[SB0]] to i64

View File

@ -16,12 +16,12 @@ entry:
}
; CHECK-LABEL: @clmul00
; CHECK: %[[S0:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
; CHECK: %[[S1:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
; CHECK: %[[S0:.*]] = load <2 x i64>, ptr {{.*}}@__msan_param_tls
; CHECK: %[[S1:.*]] = load <2 x i64>, ptr {{.*}}@__msan_param_tls
; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> poison, <2 x i32> zeroinitializer
; CHECK: %[[SHUF1:.*]] = shufflevector <2 x i64> %[[S1]], <2 x i64> poison, <2 x i32> zeroinitializer
; CHECK: %[[SRET:.*]] = or <2 x i64> %[[SHUF0]], %[[SHUF1]]
; CHECK: store <2 x i64> %[[SRET]], <2 x i64>* {{.*}}@__msan_retval_tls
; CHECK: store <2 x i64> %[[SRET]], ptr {{.*}}@__msan_retval_tls
define <2 x i64> @clmul10(<2 x i64> %a, <2 x i64> %b) sanitize_memory {
entry:
@ -30,12 +30,12 @@ entry:
}
; CHECK-LABEL: @clmul10
; CHECK: %[[S0:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
; CHECK: %[[S1:.*]] = load <2 x i64>, <2 x i64>* {{.*}}@__msan_param_tls
; CHECK: %[[S0:.*]] = load <2 x i64>, ptr {{.*}}@__msan_param_tls
; CHECK: %[[S1:.*]] = load <2 x i64>, ptr {{.*}}@__msan_param_tls
; CHECK: %[[SHUF0:.*]] = shufflevector <2 x i64> %[[S0]], <2 x i64> poison, <2 x i32> zeroinitializer
; CHECK: %[[SHUF1:.*]] = shufflevector <2 x i64> %[[S1]], <2 x i64> poison, <2 x i32> <i32 1, i32 1>
; CHECK: %[[SRET:.*]] = or <2 x i64> %[[SHUF0]], %[[SHUF1]]
; CHECK: store <2 x i64> %[[SRET]], <2 x i64>* {{.*}}@__msan_retval_tls
; CHECK: store <2 x i64> %[[SRET]], ptr {{.*}}@__msan_retval_tls
define <4 x i64> @clmul11_256(<4 x i64> %a, <4 x i64> %b) sanitize_memory {
entry:
@ -44,12 +44,12 @@ entry:
}
; CHECK-LABEL: @clmul11_256
; CHECK: %[[S0:.*]] = load <4 x i64>, <4 x i64>* {{.*}}@__msan_param_tls
; CHECK: %[[S1:.*]] = load <4 x i64>, <4 x i64>* {{.*}}@__msan_param_tls
; CHECK: %[[S0:.*]] = load <4 x i64>, ptr {{.*}}@__msan_param_tls
; CHECK: %[[S1:.*]] = load <4 x i64>, ptr {{.*}}@__msan_param_tls
; CHECK: %[[SHUF0:.*]] = shufflevector <4 x i64> %[[S0]], <4 x i64> poison, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
; CHECK: %[[SHUF1:.*]] = shufflevector <4 x i64> %[[S1]], <4 x i64> poison, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
; CHECK: %[[SRET:.*]] = or <4 x i64> %[[SHUF0]], %[[SHUF1]]
; CHECK: store <4 x i64> %[[SRET]], <4 x i64>* {{.*}}@__msan_retval_tls
; CHECK: store <4 x i64> %[[SRET]], ptr {{.*}}@__msan_retval_tls
define <8 x i64> @clmul01_512(<8 x i64> %a, <8 x i64> %b) sanitize_memory {
entry:
@ -58,13 +58,13 @@ entry:
}
; CHECK-LABEL: @clmul01_512
; CHECK: %[[S0:.*]] = load <8 x i64>, <8 x i64>* {{.*}}@__msan_param_tls
; CHECK: %[[S1:.*]] = load <8 x i64>, <8 x i64>* {{.*}}@__msan_param_tls
; CHECK: %[[S0:.*]] = load <8 x i64>, ptr {{.*}}@__msan_param_tls
; CHECK: %[[S1:.*]] = load <8 x i64>, ptr {{.*}}@__msan_param_tls
; CHECK: %[[SHUF0:.*]] = shufflevector <8 x i64> %[[S0]], <8 x i64> poison, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
; CHECK: %[[SHUF1:.*]] = shufflevector <8 x i64> %[[S1]], <8 x i64> poison, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
; CHECK: %[[SRET:.*]] = or <8 x i64> %[[SHUF0]], %[[SHUF1]]
; ORIGIN: %[[FLAT:.*]] = bitcast <8 x i64> %[[SHUF1]] to i512
; ORIGIN: %[[I:.*]] = icmp ne i512 %[[FLAT]], 0
; ORIGIN: %[[O:.*]] = select i1 %[[I]],
; CHECK: store <8 x i64> %[[SRET]], <8 x i64>* {{.*}}@__msan_retval_tls
; CHECK: store <8 x i64> %[[SRET]], ptr {{.*}}@__msan_retval_tls
; ORIGIN: store i32 %[[O]], i32* @__msan_retval_origin_tls

View File

@ -6,18 +6,18 @@ target triple = "x86_64-unknown-linux-gnu"
define void @var_funnel_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64> %a512, i64 %b64, <2 x i64> %b128, <4 x i64> %b256, <8 x i64> %b512, i64 %c64, <2 x i64> %c128, <4 x i64> %c256, <8 x i64> %c512) sanitize_memory {
; CHECK-LABEL: @var_funnel_i64(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i64*), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 240) to i64*), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <2 x i64>*), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <2 x i64>*), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 248) to <2 x i64>*), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i64>*), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <4 x i64>*), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 264) to <4 x i64>*), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <8 x i64>*), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <8 x i64>*), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <8 x i64>*), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i64 [[TMP3]], 0
; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i64
@ -50,18 +50,18 @@ define void @var_funnel_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64
define void @var_funnel_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i32> %a512, i32 %b32, <4 x i32> %b128, <8 x i32> %b256, <16 x i32> %b512, i32 %c32, <4 x i32> %c128, <8 x i32> %c256, <16 x i32> %c512) sanitize_memory {
; CHECK-LABEL: @var_funnel_i32(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i32*), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 240) to i32*), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i32>*), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <4 x i32>*), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 248) to <4 x i32>*), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <8 x i32>*), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <8 x i32>*), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 264) to <8 x i32>*), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <16 x i32>*), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <16 x i32>*), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <16 x i32>*), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP3]], 0
; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i32
@ -94,18 +94,18 @@ define void @var_funnel_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i3
define void @var_funnel_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i16> %a512, i16 %b16, <8 x i16> %b128, <16 x i16> %b256, <32 x i16> %b512, i16 %c16, <8 x i16> %c128, <16 x i16> %c256, <32 x i16> %c512) sanitize_memory {
; CHECK-LABEL: @var_funnel_i16(
; CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* bitcast ([100 x i64]* @__msan_param_tls to i16*), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i16*), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 240) to i16*), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <8 x i16>*), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <8 x i16>*), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 248) to <8 x i16>*), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <16 x i16>*), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <16 x i16>*), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 264) to <16 x i16>*), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <32 x i16>*), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <32 x i16>*), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <32 x i16>*), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i16 [[TMP3]], 0
; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i16
@ -138,18 +138,18 @@ define void @var_funnel_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i
define void @var_funnel_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> %a512, i8 %b8, <16 x i8> %b128, <32 x i8> %b256, <64 x i8> %b512, i8 %c8, <16 x i8> %c128, <32 x i8> %c256, <64 x i8> %c512) sanitize_memory {
; CHECK-LABEL: @var_funnel_i8(
; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* bitcast ([100 x i64]* @__msan_param_tls to i8*), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i8, i8* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i8*), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i8, i8* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 240) to i8*), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <16 x i8>*), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <16 x i8>*), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 248) to <16 x i8>*), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <32 x i8>*), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <32 x i8>*), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 264) to <32 x i8>*), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <64 x i8>*), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <64 x i8>*), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 296) to <64 x i8>*), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 240) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 248) to ptr), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
; CHECK-NEXT: [[TMP9:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 264) to ptr), align 8
; CHECK-NEXT: [[TMP10:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
; CHECK-NEXT: [[TMP11:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
; CHECK-NEXT: [[TMP12:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 296) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP13:%.*]] = icmp ne i8 [[TMP3]], 0
; CHECK-NEXT: [[TMP14:%.*]] = sext i1 [[TMP13]] to i8
@ -182,14 +182,14 @@ define void @var_funnel_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> %
define void @var_rotate_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64> %a512, i64 %c64, <2 x i64> %c128, <4 x i64> %c256, <8 x i64> %c512) sanitize_memory {
; CHECK-LABEL: @var_rotate_i64(
; CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i64*), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <2 x i64>*), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, <2 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <2 x i64>*), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <4 x i64>*), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <4 x i64>*), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <8 x i64>*), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <8 x i64>*), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <8 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i64 [[TMP2]], 0
; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i64
@ -222,14 +222,14 @@ define void @var_rotate_i64(i64 %a64, <2 x i64> %a128, <4 x i64> %a256, <8 x i64
define void @var_rotate_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i32> %a512, i32 %c32, <4 x i32> %c128, <8 x i32> %c256, <16 x i32> %c512) sanitize_memory {
; CHECK-LABEL: @var_rotate_i32(
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* bitcast ([100 x i64]* @__msan_param_tls to i32*), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i32*), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i32>*), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <4 x i32>*), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <8 x i32>*), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, <8 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <8 x i32>*), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <16 x i32>*), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <16 x i32>*), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i32
@ -262,14 +262,14 @@ define void @var_rotate_i32(i32 %a32, <4 x i32> %a128, <8 x i32> %a256, <16 x i3
define void @var_rotate_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i16> %a512, i16 %c16, <8 x i16> %c128, <16 x i16> %c256, <32 x i16> %c512) sanitize_memory {
; CHECK-LABEL: @var_rotate_i16(
; CHECK-NEXT: [[TMP1:%.*]] = load i16, i16* bitcast ([100 x i64]* @__msan_param_tls to i16*), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i16*), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <8 x i16>*), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, <8 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <8 x i16>*), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <16 x i16>*), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i16>, <16 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <16 x i16>*), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <32 x i16>*), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i16>, <32 x i16>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <32 x i16>*), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <32 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i16 [[TMP2]], 0
; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i16
@ -302,14 +302,14 @@ define void @var_rotate_i16(i16 %a16, <8 x i16> %a128, <16 x i16> %a256, <32 x i
define void @var_rotate_i8(i8 %a8, <16 x i8> %a128, <32 x i8> %a256, <64 x i8> %a512, i8 %c8, <16 x i8> %c128, <32 x i8> %c256, <64 x i8> %c512) sanitize_memory {
; CHECK-LABEL: @var_rotate_i8(
; CHECK-NEXT: [[TMP1:%.*]] = load i8, i8* bitcast ([100 x i64]* @__msan_param_tls to i8*), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i8, i8* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 120) to i8*), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <16 x i8>*), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <16 x i8>*), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 24) to <32 x i8>*), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <32 x i8>, <32 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 144) to <32 x i8>*), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 56) to <64 x i8>*), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <64 x i8>, <64 x i8>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 176) to <64 x i8>*), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 120) to ptr), align 8
; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
; CHECK-NEXT: [[TMP6:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 144) to ptr), align 8
; CHECK-NEXT: [[TMP7:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 56) to ptr), align 8
; CHECK-NEXT: [[TMP8:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 176) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i8 [[TMP2]], 0
; CHECK-NEXT: [[TMP10:%.*]] = sext i1 [[TMP9]] to i8

View File

@ -15,8 +15,8 @@ entry:
}
; CHECK-LABEL: @Shuffle(
; CHECK: [[A:%.*]] = load i32, i32* {{.*}}@__msan_param_origin_tls,
; CHECK: store i32 [[A]], i32* @__msan_retval_origin_tls
; CHECK: [[A:%.*]] = load i32, ptr @__msan_param_origin_tls
; CHECK: store i32 [[A]], ptr @__msan_retval_origin_tls
; CHECK: ret <4 x i32>

View File

@ -19,8 +19,8 @@ entry:
; CHECK-DAG: declare void @__sanitizer_cov_trace_div4(i32 zeroext)
; CHECK-DAG: declare void @__sanitizer_cov_trace_div8(i64)
; CHECK-DAG: declare void @__sanitizer_cov_trace_gep(i64)
; CHECK-DAG: declare void @__sanitizer_cov_trace_switch(i64, i64*)
; CHECK-DAG: declare void @__sanitizer_cov_trace_switch(i64, ptr)
; CHECK-DAG: declare void @__sanitizer_cov_trace_pc()
; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard(i32*)
; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard_init(i32*, i32*)
; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard(ptr)
; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard_init(ptr, ptr)
; CHECK-NOT: declare

View File

@ -15,8 +15,8 @@ entry:
; CHECK-DAG: declare void @__sanitizer_cov_trace_div4(i32 zeroext)
; CHECK-DAG: declare void @__sanitizer_cov_trace_div8(i64)
; CHECK-DAG: declare void @__sanitizer_cov_trace_gep(i64)
; CHECK-DAG: declare void @__sanitizer_cov_trace_switch(i64, i64*)
; CHECK-DAG: declare void @__sanitizer_cov_trace_switch(i64, ptr)
; CHECK-DAG: declare void @__sanitizer_cov_trace_pc()
; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard(i32*)
; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard_init(i32*, i32*)
; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard(ptr)
; CHECK-DAG: declare void @__sanitizer_cov_trace_pc_guard_init(ptr, ptr)
; CHECK-NOT: declare

View File

@ -26,16 +26,16 @@
; CHECK: @__sancov_gen_{{.*}} = private global [1 x i8] zeroinitializer, section ".SCOV$CM", comdat($foo), align 1
; CHECK: @__sancov_gen_{{.*}} = private constant [2 x i64*]
; CHECK-SAME: [i64* bitcast (i32 (i32)* @foo to i64*), i64* inttoptr (i64 1 to i64*)],
; CHECK: @__sancov_gen_{{.*}} = private constant [2 x ptr]
; CHECK-SAME: [ptr @foo, ptr inttoptr (i64 1 to ptr)],
; CHECK-SAME: section ".SCOVP$M", comdat($foo), align 8
; Tables for 'bar' should be in the 'bar' comdat.
; CHECK: @__sancov_gen_{{.*}} = private global [1 x i8] zeroinitializer, section ".SCOV$CM", comdat($bar), align 1
; CHECK: @__sancov_gen_{{.*}} = private constant [2 x i64*]
; CHECK-SAME: [i64* bitcast (i32 (i32)* @bar to i64*), i64* inttoptr (i64 1 to i64*)],
; CHECK: @__sancov_gen_{{.*}} = private constant [2 x ptr]
; CHECK-SAME: [ptr @bar, ptr inttoptr (i64 1 to ptr)],
; CHECK-SAME: section ".SCOVP$M", comdat($bar), align 8
; 'foo' and 'bar' should be in their new comdat groups.

View File

@ -9,9 +9,9 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "x86_64-unknown-linux-gnu"
define void @foo() {
entry:
; CHECK: %0 = load i8, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize
; CHECK: %0 = load i8, ptr @__sancov_gen_, align 1, !nosanitize
; CHECK: %1 = add i8 %0, 1
; CHECK: store i8 %1, i8* getelementptr inbounds ([1 x i8], [1 x i8]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize
; CHECK: store i8 %1, ptr @__sancov_gen_, align 1, !nosanitize
ret void
}
; CHECK: call void @__sanitizer_cov_8bit_counters_init(i8* @__start___sancov_cntrs, i8* @__stop___sancov_cntrs)
; CHECK: call void @__sanitizer_cov_8bit_counters_init(ptr @__start___sancov_cntrs, ptr @__stop___sancov_cntrs)

View File

@ -5,19 +5,19 @@
; CHECK: @__sancov_gen_ = private global [1 x i1] zeroinitializer, section "__sancov_bools", comdat($foo), align 1{{$}}
; CHECK: @__start___sancov_bools = extern_weak hidden global i1
; CHECK-NEXT: @__stop___sancov_bools = extern_weak hidden global i1
; CHECK: @llvm.used = appending global [1 x i8*] [i8* bitcast (void ()* @sancov.module_ctor_bool_flag to i8*)], section "llvm.metadata"
; CHECK: @llvm.compiler.used = appending global [1 x i8*] [i8* bitcast ([1 x i1]* @__sancov_gen_ to i8*)], section "llvm.metadata"
; CHECK: @llvm.used = appending global [1 x ptr] [ptr @sancov.module_ctor_bool_flag], section "llvm.metadata"
; CHECK: @llvm.compiler.used = appending global [1 x ptr] [ptr @__sancov_gen_], section "llvm.metadata"
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-unknown-linux-gnu"
define void @foo() {
; CHECK-LABEL: @foo(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i1, i1* getelementptr inbounds ([1 x i1], [1 x i1]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize ![[#EMPTY:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i1, ptr @__sancov_gen_, align 1, !nosanitize ![[#EMPTY:]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i1 [[TMP0]], false
; CHECK-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP3:%.*]]
; CHECK: 2:
; CHECK-NEXT: store i1 true, i1* getelementptr inbounds ([1 x i1], [1 x i1]* @__sancov_gen_, i64 0, i64 0), align 1, !nosanitize ![[#EMPTY:]]
; CHECK-NEXT: store i1 true, ptr @__sancov_gen_, align 1, !nosanitize ![[#EMPTY:]]
; CHECK-NEXT: br label [[TMP3]]
; CHECK: 3:
; CHECK-NEXT: ret void
@ -25,6 +25,6 @@ define void @foo() {
entry:
ret void
}
; CHECK: call void @__sanitizer_cov_bool_flag_init(i1* @__start___sancov_bools, i1* @__stop___sancov_bools)
; CHECK: call void @__sanitizer_cov_bool_flag_init(ptr @__start___sancov_bools, ptr @__stop___sancov_bools)
; CHECK: ![[#EMPTY]] = !{}

View File

@ -8,7 +8,7 @@ define void @foo(i32 %x) {
entry:
; CHECK: __sancov_gen_cov_switch_values = internal global [5 x i64] [i64 3, i64 32, i64 1, i64 101, i64 1001]
; CHECK: [[TMP:%[0-9]*]] = zext i32 %x to i64
; CHECK-NEXT: call void @__sanitizer_cov_trace_switch(i64 [[TMP]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @__sancov_gen_cov_switch_values, i32 0, i32 0))
; CHECK-NEXT: call void @__sanitizer_cov_trace_switch(i64 [[TMP]], ptr @__sancov_gen_cov_switch_values)
switch i32 %x, label %sw.epilog [
i32 1, label %sw.bb
i32 1001, label %sw.bb.1

View File

@ -2,7 +2,7 @@
; RUN: llvm-as %s -o %t.2.bc
; RUN: llvm-link %t.1.bc %t.2.bc -S | FileCheck %s
; CHECK: @bar = global i32 ()* @foo.2
; CHECK: @bar = global ptr @foo.2
; CHECK: define internal i32 @foo.2() {
; CHECK-NEXT: ret i32 7

View File

@ -3,7 +3,7 @@
; RUN: llvm-as %p/2008-07-06-AliasFnDecl2.ll -o %t2.bc
; RUN: llvm-link %t1.bc %t2.bc -o %t3.bc
@b = alias void (), void ()* @a
@b = alias void (), ptr @a
define void @a() nounwind {
entry:

View File

@ -7,9 +7,9 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
target triple = "i386-pc-linux-gnu"
@sched_clock = alias i64 (), i64 ()* @native_sched_clock
@sched_clock = alias i64 (), ptr @native_sched_clock
@foo = alias i32, i32* @realfoo
@foo = alias i32, ptr @realfoo
@realfoo = global i32 0
define i64 @native_sched_clock() nounwind {

View File

@ -6,11 +6,11 @@
; RUN: llvm-link %t.1.bc %t.2.bc -S | FileCheck %s
; CHECK: [i32 7, i32 4, i32 8]
@X = appending global [2 x i32] [ i32 7, i32 4 ] ; <[2 x i32]*> [#uses=2]
@Y = global i32* getelementptr ([2 x i32], [2 x i32]* @X, i64 0, i64 0) ; <i32**> [#uses=0]
@X = appending global [2 x i32] [ i32 7, i32 4 ]
@Y = global ptr @X
define void @foo(i64 %V) {
%Y = getelementptr [2 x i32], [2 x i32]* @X, i64 0, i64 %V ; <i32*> [#uses=0]
%Y = getelementptr [2 x i32], ptr @X, i64 0, i64 %V
ret void
}

View File

@ -1,4 +1,4 @@
@bar = global i32()* @foo
@bar = global ptr @foo
define internal i32 @foo() {
ret i32 7
}

View File

@ -3,7 +3,7 @@
declare i32 @foo()
define void @bar() !dbg !4 {
load i32, i32* @X, !dbg !10
load i32, ptr @X, !dbg !10
call i32 @foo(), !dbg !11
ret void, !dbg !12
}

View File

@ -1,7 +1,7 @@
define i32 @main() #0 !dbg !4 {
entry:
%retval = alloca i32, align 4
store i32 0, i32* %retval, align 4
store i32 0, ptr %retval, align 4
ret i32 0, !dbg !11
}

View File

@ -5,7 +5,7 @@
; erroneously renamed to A.1 and not linked to the declaration from
; the first module
@C = alias void (), void ()* @A
@C = alias void (), ptr @A
define void @D() {
call void @C()
@ -16,7 +16,7 @@ define void @A() {
ret void
}
; CHECK-DAG: @C = alias void (), void ()* @A
; CHECK-DAG: @C = alias void (), ptr @A
; CHECK-DAG: define void @B()
; CHECK-DAG: call void @A()
; CHECK-DAG: define void @D()

View File

@ -4,6 +4,6 @@
; Verify that linking GlobalAliases preserves the thread_local attribute
; CHECK: @tlsvar1 = thread_local global i32 0, align 4
; CHECK: @tlsvar2 = hidden thread_local alias i32, i32* @tlsvar1
; CHECK: @tlsvar2 = hidden thread_local alias i32, ptr @tlsvar1
@tlsvar2 = external thread_local global i32, align 4

View File

@ -33,22 +33,22 @@ define void @bar() comdat($foo) {
$c1 = comdat any
@v1 = weak_odr global i32 42, comdat($c1)
define weak_odr i32 @f1(i8*) comdat($c1) {
define weak_odr i32 @f1(ptr) comdat($c1) {
bb10:
br label %bb11
bb11:
ret i32 42
}
@r11 = global i32* @v1
@r12 = global i32 (i8*)* @f1
@r11 = global ptr @v1
@r12 = global ptr @f1
@a11 = alias i32, i32* @v1
@a12 = alias i16, bitcast (i32* @v1 to i16*)
@a11 = alias i32, ptr @v1
@a12 = alias i16, ptr @v1
@a13 = alias i32 (i8*), i32 (i8*)* @f1
@a14 = alias i16, bitcast (i32 (i8*)* @f1 to i16*)
@a15 = alias i16, i16* @a14
@a13 = alias i32 (ptr), ptr @f1
@a14 = alias i16, ptr @f1
@a15 = alias i16, ptr @a14
; CHECK2: $c1 = comdat any
; CHECK2: $c2 = comdat any
@ -57,36 +57,36 @@ bb11:
; CHECK2-DAG: @v1 = weak_odr global i32 42, comdat($c1)
; CHECK2-DAG: @r11 = global i32* @v1{{$}}
; CHECK2-DAG: @r12 = global i32 (i8*)* @f1{{$}}
; CHECK2-DAG: @r11 = global ptr @v1{{$}}
; CHECK2-DAG: @r12 = global ptr @f1{{$}}
; CHECK2-DAG: @r21 = global i32* @v1{{$}}
; CHECK2-DAG: @r22 = global i32 (i8*)* @f1{{$}}
; CHECK2-DAG: @r21 = global ptr @v1{{$}}
; CHECK2-DAG: @r22 = global ptr @f1{{$}}
; CHECK2-DAG: @v1.1 = internal global i32 41, comdat($c2)
; CHECK2-DAG: @a11 = alias i32, i32* @v1{{$}}
; CHECK2-DAG: @a12 = alias i16, bitcast (i32* @v1 to i16*)
; CHECK2-DAG: @a11 = alias i32, ptr @v1{{$}}
; CHECK2-DAG: @a12 = alias i16, ptr @v1
; CHECK2-DAG: @a13 = alias i32 (i8*), i32 (i8*)* @f1{{$}}
; CHECK2-DAG: @a14 = alias i16, bitcast (i32 (i8*)* @f1 to i16*)
; CHECK2-DAG: @a13 = alias i32 (ptr), ptr @f1{{$}}
; CHECK2-DAG: @a14 = alias i16, ptr @f1
; CHECK2-DAG: @a21 = alias i32, i32* @v1.1{{$}}
; CHECK2-DAG: @a22 = alias i16, bitcast (i32* @v1.1 to i16*)
; CHECK2-DAG: @a21 = alias i32, ptr @v1.1{{$}}
; CHECK2-DAG: @a22 = alias i16, ptr @v1.1
; CHECK2-DAG: @a23 = alias i32 (i8*), i32 (i8*)* @f1.2{{$}}
; CHECK2-DAG: @a24 = alias i16, bitcast (i32 (i8*)* @f1.2 to i16*)
; CHECK2-DAG: @a23 = alias i32 (ptr), ptr @f1.2{{$}}
; CHECK2-DAG: @a24 = alias i16, ptr @f1.2
; CHECK2: define weak_odr protected i32 @f1(i8* %0) comdat($c1) {
; CHECK2: define weak_odr protected i32 @f1(ptr %0) comdat($c1) {
; CHECK2-NEXT: bb10:
; CHECK2-NEXT: br label %bb11{{$}}
; CHECK2: bb11:
; CHECK2-NEXT: ret i32 42
; CHECK2-NEXT: }
; CHECK2: define internal i32 @f1.2(i8* %this) comdat($c2) {
; CHECK2: define internal i32 @f1.2(ptr %this) comdat($c2) {
; CHECK2-NEXT: bb20:
; CHECK2-NEXT: store i8* %this, i8** null
; CHECK2-NEXT: store ptr %this, ptr null
; CHECK2-NEXT: br label %bb21
; CHECK2: bb21:
; CHECK2-NEXT: ret i32 41
@ -99,38 +99,38 @@ $c1 = comdat any
; This is only present in this file. The linker will keep $c1 from the first
; file and this will be undefined.
@will_be_undefined = global i32 1, comdat($c1)
@use = global i32* @will_be_undefined
@use = global ptr @will_be_undefined
@v1 = weak_odr global i32 41, comdat($c2)
define weak_odr protected i32 @f1(i8* %this) comdat($c2) {
define weak_odr protected i32 @f1(ptr %this) comdat($c2) {
bb20:
store i8* %this, i8** null
store ptr %this, ptr null
br label %bb21
bb21:
ret i32 41
}
@r21 = global i32* @v1
@r22 = global i32(i8*)* @f1
@r21 = global ptr @v1
@r22 = global ptr @f1
@a21 = alias i32, i32* @v1
@a22 = alias i16, bitcast (i32* @v1 to i16*)
@a21 = alias i32, ptr @v1
@a22 = alias i16, ptr @v1
@a23 = alias i32(i8*), i32(i8*)* @f1
@a24 = alias i16, bitcast (i32(i8*)* @f1 to i16*)
@a25 = alias i16, i16* @a24
@a23 = alias i32(ptr), ptr @f1
@a24 = alias i16, ptr @f1
@a25 = alias i16, ptr @a24
;--- 3.ll
; CHECK3: @bar = global i32 0, comdat($a1)
; CHECK3: @baz = private global i32 42, comdat($a1)
; CHECK3: @a1 = internal alias i32, i32* @baz
; CHECK3: @a1 = internal alias i32, ptr @baz
$a1 = comdat any
@bar = global i32 0, comdat($a1)
;--- 3-aux.ll
$a1 = comdat any
@baz = private global i32 42, comdat($a1)
@a1 = internal alias i32, i32* @baz
define i32* @abc() {
ret i32* @a1
@a1 = internal alias i32, ptr @baz
define ptr @abc() {
ret ptr @a1
}

View File

@ -3,5 +3,5 @@
$foo = comdat any
@foo = global i8 0, comdat
; CHECK: @llvm.global_ctors = appending global [0 x { i32, void ()*, i8* }] zeroinitializer
; CHECK: @llvm.global_ctors = appending global [0 x { i32, ptr, ptr }] zeroinitializer
; CHECK: @foo = global i8 0, comdat

View File

@ -4,5 +4,5 @@ $foo = comdat any
%t = type { i8 }
@foo = global %t zeroinitializer, comdat
; CHECK: @llvm.global_ctors = appending global [0 x { i32, void ()*, i8* }] zeroinitializer
; CHECK: @llvm.global_ctors = appending global [0 x { i32, ptr, ptr }] zeroinitializer
; CHECK: @foo = global %t zeroinitializer, comdat

View File

@ -16,7 +16,7 @@
; EXPORTSTATIC-DAG: @staticvar.llvm.{{.*}} = hidden global
; Eventually @staticconstvar can be exported as a copy and not promoted
; EXPORTSTATIC-DAG: @staticconstvar.llvm.0 = hidden unnamed_addr constant
; EXPORTSTATIC-DAG: @P.llvm.{{.*}} = hidden global void ()* null
; EXPORTSTATIC-DAG: @P.llvm.{{.*}} = hidden global ptr null
; EXPORTSTATIC-DAG: define hidden i32 @staticfunc.llvm.
; EXPORTSTATIC-DAG: define hidden void @staticfunc2.llvm.
@ -72,7 +72,7 @@
; IMPORTSTATIC-DAG: @staticconstvar.llvm.{{.*}} = external hidden unnamed_addr constant
; IMPORTSTATIC-DAG: define available_externally i32 @referencestatics
; IMPORTSTATIC-DAG: %call = call i32 @staticfunc.llvm.
; IMPORTSTATIC-DAG: %0 = load i32, i32* @staticvar.llvm.
; IMPORTSTATIC-DAG: %0 = load i32, ptr @staticvar.llvm.
; IMPORTSTATIC-DAG: declare hidden i32 @staticfunc.llvm.
; Ensure that imported global (external) function and variable references
@ -90,9 +90,9 @@
; Ensure that imported static function pointer correctly promoted and renamed.
; RUN: llvm-link %t2.bc -summary-index=%t3.thinlto.bc -import=callfuncptr:%t.bc -S | FileCheck %s --check-prefix=IMPORTFUNCPTR
; IMPORTFUNCPTR-DAG: @P.llvm.{{.*}} = external hidden global void ()*
; IMPORTFUNCPTR-DAG: @P.llvm.{{.*}} = external hidden global ptr
; IMPORTFUNCPTR-DAG: define available_externally void @callfuncptr
; IMPORTFUNCPTR-DAG: %0 = load void ()*, void ()** @P.llvm.
; IMPORTFUNCPTR-DAG: %0 = load ptr, ptr @P.llvm.
; Ensure that imported weak function reference/definition handled properly.
; Imported weak_any definition should be skipped with warning, and imported
@ -107,11 +107,11 @@
@staticvar = internal global i32 1, align 4
@staticconstvar = internal unnamed_addr constant [2 x i32] [i32 10, i32 20], align 4
@commonvar = common global i32 0, align 4
@P = internal global void ()* null, align 8
@P = internal global ptr null, align 8
@weakalias = weak alias void (...), bitcast (void ()* @globalfunc1 to void (...)*)
@analias = alias void (...), bitcast (void ()* @globalfunc2 to void (...)*)
@linkoncealias = alias void (...), bitcast (void ()* @linkoncefunc to void (...)*)
@weakalias = weak alias void (...), ptr @globalfunc1
@analias = alias void (...), ptr @globalfunc2
@linkoncealias = alias void (...), ptr @linkoncefunc
define void @globalfunc1() #0 {
entry:
@ -131,14 +131,14 @@ entry:
define i32 @referencestatics(i32 %i) #0 {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
store i32 %i, ptr %i.addr, align 4
%call = call i32 @staticfunc()
%0 = load i32, i32* @staticvar, align 4
%0 = load i32, ptr @staticvar, align 4
%add = add nsw i32 %call, %0
%1 = load i32, i32* %i.addr, align 4
%1 = load i32, ptr %i.addr, align 4
%idxprom = sext i32 %1 to i64
%arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* @staticconstvar, i64 0, i64 %idxprom
%2 = load i32, i32* %arrayidx, align 4
%arrayidx = getelementptr inbounds [2 x i32], ptr @staticconstvar, i64 0, i64 %idxprom
%2 = load i32, ptr %arrayidx, align 4
%add1 = add nsw i32 %add, %2
ret i32 %add1
}
@ -146,29 +146,29 @@ entry:
define i32 @referenceglobals(i32 %i) #0 {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
store i32 %i, ptr %i.addr, align 4
call void @globalfunc1()
%0 = load i32, i32* @globalvar, align 4
%0 = load i32, ptr @globalvar, align 4
ret i32 %0
}
define i32 @referencecommon(i32 %i) #0 {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%0 = load i32, i32* @commonvar, align 4
store i32 %i, ptr %i.addr, align 4
%0 = load i32, ptr @commonvar, align 4
ret i32 %0
}
define void @setfuncptr() #0 {
entry:
store void ()* @staticfunc2, void ()** @P, align 8
store ptr @staticfunc2, ptr @P, align 8
ret void
}
define void @callfuncptr() #0 {
entry:
%0 = load void ()*, void ()** @P, align 8
%0 = load ptr, ptr @P, align 8
call void %0()
ret void
}

View File

@ -3,23 +3,23 @@
;; Check that ifuncs are linked in properly.
; CHECK-DAG: @foo = ifunc void (), void ()* ()* @foo_resolve
; CHECK-DAG: define internal void ()* @foo_resolve() {
; CHECK-DAG: @foo = ifunc void (), ptr @foo_resolve
; CHECK-DAG: define internal ptr @foo_resolve() {
; CHECK-DAG: @bar = ifunc void (), void ()* ()* @bar_resolve
; CHECK-DAG: define internal void ()* @bar_resolve() {
; CHECK-DAG: @bar = ifunc void (), ptr @bar_resolve
; CHECK-DAG: define internal ptr @bar_resolve() {
;--- a.ll
declare void @bar()
;--- b.ll
@foo = ifunc void (), void ()* ()* @foo_resolve
@bar = ifunc void (), void ()* ()* @bar_resolve
@foo = ifunc void (), ptr @foo_resolve
@bar = ifunc void (), ptr @bar_resolve
define internal void ()* @foo_resolve() {
ret void ()* null
define internal ptr @foo_resolve() {
ret ptr null
}
define internal void ()* @bar_resolve() {
ret void ()* null
define internal ptr @bar_resolve() {
ret ptr null
}

View File

@ -8,10 +8,10 @@
@is_really_as1_gv_other_type = external global i32
; CHECK-LABEL: @foo(
; CHECK: %load0 = load volatile i32, i32* addrspacecast (i32 addrspace(1)* @is_really_as1_gv to i32*), align 4
; CHECK: %load1 = load volatile i32, i32* addrspacecast (i32 addrspace(1)* bitcast (float addrspace(1)* @is_really_as1_gv_other_type to i32 addrspace(1)*) to i32*), align 4
; CHECK: %load0 = load volatile i32, ptr addrspacecast (ptr addrspace(1) @is_really_as1_gv to ptr), align 4
; CHECK: %load1 = load volatile i32, ptr addrspacecast (ptr addrspace(1) @is_really_as1_gv_other_type to ptr), align 4
define void @foo() {
%load0 = load volatile i32, i32* @is_really_as1_gv, align 4
%load1 = load volatile i32, i32* @is_really_as1_gv_other_type, align 4
%load0 = load volatile i32, ptr @is_really_as1_gv, align 4
%load1 = load volatile i32, ptr @is_really_as1_gv_other_type, align 4
ret void
}

View File

@ -5,6 +5,6 @@ target triple = "x86_64-unknown-linux-gnu"
@foo = external dso_local local_unnamed_addr constant i32, align 4
define dso_local i32 @_Z3barv() local_unnamed_addr {
entry:
%0 = load i32, i32* @foo, align 4
%0 = load i32, ptr @foo, align 4
ret i32 %0
}

View File

@ -1,7 +1,7 @@
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
define i32 @main({ i64, { i64, i8* }* } %unnamed) #0 {
define i32 @main({ i64, ptr } %unnamed) #0 {
%1 = call i32 @_simplefunction() #1
ret i32 %1
}

View File

@ -10,7 +10,7 @@
; RUN: | llvm-bcanalyzer -dump | FileCheck %s
; CHECK: <STRTAB_BLOCK
; CHECK-NEXT: blob data = 'mainglobalfunc1llvm.invariant.start.p0i8{{.*}}'
; CHECK-NEXT: blob data = 'mainglobalfunc1llvm.invariant.start.p0{{.*}}'
; Check that the summary is able to print the names despite the lack of
; string table in the legacy bitcode.

View File

@ -139,7 +139,7 @@ target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
target triple = "x86_64-apple-macosx10.11.0"
@llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__I_a, i8* null }]
@llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 65535, ptr @_GLOBAL__I_a, ptr null }]
declare void @baz()
@ -183,7 +183,7 @@ define available_externally void @live_available_externally_func() {
; alive.
; We want to make sure the @linkonceodrfuncwithalias copy in Input/deadstrip.ll
; is also scanned when computing reachability.
@linkonceodralias = linkonce_odr alias void (), void ()* @linkonceodrfuncwithalias
@linkonceodralias = linkonce_odr alias void (), ptr @linkonceodrfuncwithalias
define linkonce_odr void @linkonceodrfuncwithalias() {
entry:

View File

@ -38,6 +38,6 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
define i32 @main() local_unnamed_addr {
store i32 42, i32* @A, align 4
store i32 42, ptr @A, align 4
ret i32 0
}

View File

@ -27,7 +27,7 @@ target triple = "x86_64-apple-macosx10.11.0"
define i32 @main() #0 {
entry:
call void (...) @foo()
%0 = load i32, i32* @baz, align 4
%0 = load i32, ptr @baz, align 4
ret i32 %0
}

View File

@ -18,6 +18,6 @@ target triple = "x86_64-pc-linux-gnu"
@baz = external local_unnamed_addr constant i32, align 4
define i32 @main() local_unnamed_addr {
%1 = load i32, i32* @baz, align 4
%1 = load i32, ptr @baz, align 4
ret i32 %1
}

View File

@ -61,64 +61,64 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
target triple = "x86_64-unknown-linux-gnu"
@a = dso_local global i32 42, align 4
@b = dso_local global i32* @a, align 8
@b = dso_local global ptr @a, align 8
define dso_local void @extern() {
call i32 @extern_aux(i32* @a, i32** @b)
call i32 @extern_aux(ptr @a, ptr @b)
ret void
}
define dso_local i32 @extern_aux(i32* %a, i32** %b) {
%p = load i32*, i32** %b, align 8
store i32 33, i32* %p, align 4
%v = load i32, i32* %a, align 4
define dso_local i32 @extern_aux(ptr %a, ptr %b) {
%p = load ptr, ptr %b, align 8
store i32 33, ptr %p, align 4
%v = load i32, ptr %a, align 4
ret i32 %v
}
define linkonce dso_local void @linkonce() {
call i32 @linkonce_aux(i32* @a, i32** @b)
call i32 @linkonce_aux(ptr @a, ptr @b)
ret void
}
define linkonce i32 @linkonce_aux(i32* %a, i32** %b) {
%p = load i32*, i32** %b, align 8
store i32 33, i32* %p, align 4
%v = load i32, i32* %a, align 4
define linkonce i32 @linkonce_aux(ptr %a, ptr %b) {
%p = load ptr, ptr %b, align 8
store i32 33, ptr %p, align 4
%v = load i32, ptr %a, align 4
ret i32 %v
}
define linkonce_odr dso_local void @linkonceodr() {
call i32 @linkonceodr_aux(i32* @a, i32** @b)
call i32 @linkonceodr_aux(ptr @a, ptr @b)
ret void
}
define linkonce_odr i32 @linkonceodr_aux(i32* %a, i32** %b) {
%p = load i32*, i32** %b, align 8
store i32 33, i32* %p, align 4
%v = load i32, i32* %a, align 4
define linkonce_odr i32 @linkonceodr_aux(ptr %a, ptr %b) {
%p = load ptr, ptr %b, align 8
store i32 33, ptr %p, align 4
%v = load i32, ptr %a, align 4
ret i32 %v
}
define weak dso_local void @weak() {
call i32 @weak_aux(i32* @a, i32** @b)
call i32 @weak_aux(ptr @a, ptr @b)
ret void
}
define weak i32 @weak_aux(i32* %a, i32** %b) {
%p = load i32*, i32** %b, align 8
store i32 33, i32* %p, align 4
%v = load i32, i32* %a, align 4
define weak i32 @weak_aux(ptr %a, ptr %b) {
%p = load ptr, ptr %b, align 8
store i32 33, ptr %p, align 4
%v = load i32, ptr %a, align 4
ret i32 %v
}
define weak_odr dso_local void @weakodr() {
call i32 @weakodr_aux(i32* @a, i32** @b)
call i32 @weakodr_aux(ptr @a, ptr @b)
ret void
}
define weak_odr i32 @weakodr_aux(i32* %a, i32** %b) {
%p = load i32*, i32** %b, align 8
store i32 33, i32* %p, align 4
%v = load i32, i32* %a, align 4
define weak_odr i32 @weakodr_aux(ptr %a, ptr %b) {
%p = load ptr, ptr %b, align 8
store i32 33, ptr %p, align 4
%v = load i32, ptr %a, align 4
ret i32 %v
}

View File

@ -26,7 +26,7 @@ target triple = "x86_64-unknown-linux-gnu"
@foo = external dso_local local_unnamed_addr constant i32, align 4
define dso_local i32 @main() local_unnamed_addr {
entry:
%0 = load i32, i32* @foo, align 4
%0 = load i32, ptr @foo, align 4
%call = tail call i32 @_Z3barv()
%add = add nsw i32 %call, %0
ret i32 %add

View File

@ -12,6 +12,6 @@ target triple = "x86_64-unknown-linux-gnu"
@g = external global i32
define i32 @main() {
%v = load i32, i32* @g
%v = load i32, ptr @g
ret i32 %v
}

View File

@ -18,7 +18,7 @@ declare i32 @foo()
define i32 @main() {
%v = call i32 @foo()
%v2 = load i32, i32* @g
%v2 = load i32, ptr @g
%v3 = add i32 %v, %v2
ret i32 %v3
}

View File

@ -13,9 +13,9 @@ target triple = "x86_64-unknown-linux-gnu"
@g = external global i32
define i32 @main() {
%v = load i32, i32* @g
%v = load i32, ptr @g
%q = add i32 %v, 1
store i32 %q, i32* @g
store i32 %q, ptr @g
ret i32 %v
}

View File

@ -23,11 +23,11 @@ target triple = "x86_64-pc-linux-gnu"
declare void @foo()
define void @bar() personality i32 (i32, i32, i64, i8*, i8*)* @personality_routine {
define void @bar() personality ptr @personality_routine {
ret void
}
define internal i32 @personality_routine(i32, i32, i64, i8*, i8*) {
define internal i32 @personality_routine(i32, i32, i64, ptr, ptr) {
call void @foo()
ret i32 0
}

View File

@ -8,8 +8,8 @@
; can make a local copy of someglobal and someglobal2 because they are both
; 'unnamed_addr' constants. This should eventually be done as well.
; RUN: llvm-lto -thinlto-action=import -import-constants-with-refs %t.bc -thinlto-index=%t3.bc -o - | llvm-dis -o - | FileCheck %s --check-prefix=IMPORT
; IMPORT: @someglobal.llvm.0 = available_externally hidden unnamed_addr constant i8* bitcast (void ()* @referencedbyglobal to i8*)
; IMPORT: @someglobal2.llvm.0 = available_externally hidden unnamed_addr constant i8* bitcast (void ()* @localreferencedbyglobal.llvm.0 to i8*)
; IMPORT: @someglobal.llvm.0 = available_externally hidden unnamed_addr constant ptr @referencedbyglobal
; IMPORT: @someglobal2.llvm.0 = available_externally hidden unnamed_addr constant ptr @localreferencedbyglobal.llvm.0
; IMPORT: define available_externally void @bar()
; Check the export side: we currently only export bar(), which causes

View File

@ -22,7 +22,7 @@ target triple = "x86_64-unknown-linux-gnu"
@G = weak dso_local local_unnamed_addr global i32 0, align 4
define dso_local i32 @main() local_unnamed_addr {
%1 = load i32, i32* @G, align 4
%1 = load i32, ptr @G, align 4
ret i32 %1
}

View File

@ -14,8 +14,8 @@
;
; The if-then-else blocks should be in just one function.
; CHECK: [[FOO_DIAMOND_LABEL]]:
; CHECK: call void [[FOO_DIAMOND:@[^(]*]](i32 %arg1, i32 %arg, i32* [[RES_VAL_ADDR]])
; CHECK-NEXT: [[RES_VAL:%[^ ]*]] = load i32, i32* [[RES_VAL_ADDR]]
; CHECK: call void [[FOO_DIAMOND:@[^(]*]](i32 %arg1, i32 %arg, ptr [[RES_VAL_ADDR]])
; CHECK-NEXT: [[RES_VAL:%[^ ]*]] = load i32, ptr [[RES_VAL_ADDR]]
; Then it should directly jump to end.
; CHECK: br label %[[FOO_END_LABEL:.*$]]
;
@ -61,7 +61,7 @@ ret1:
; CHECK: br i1 %or.cond, label %bb9, label %[[BAR_DIAMOND_LABEL:.*$]]
;
; CHECK: [[BAR_DIAMOND_LABEL]]:
; CHECK: [[CMP:%[^ ]*]] = call i1 [[BAR_DIAMOND:@[^(]*]](i32 %arg1, i32 %arg, i32*
; CHECK: [[CMP:%[^ ]*]] = call i1 [[BAR_DIAMOND:@[^(]*]](i32 %arg1, i32 %arg, ptr
; CHECK: br i1 [[CMP]], label %bb26, label %bb30
define i32 @bar(i32 %arg, i32 %arg1) {
bb:

View File

@ -66,8 +66,8 @@ entry:
ret i32 %c3
}
; CHECK: define internal void @callee_writeonly.1.if.then(i32 %v, i32* %sub.out) [[FN_ATTRS0:#[0-9]+]]
; CHECK: define internal void @callee_most.2.if.then(i32 %v, i32* %sub.out) [[FN_ATTRS:#[0-9]+]]
; CHECK: define internal void @callee_writeonly.1.if.then(i32 %v, ptr %sub.out) [[FN_ATTRS0:#[0-9]+]]
; CHECK: define internal void @callee_most.2.if.then(i32 %v, ptr %sub.out) [[FN_ATTRS:#[0-9]+]]
; attributes to preserve
attributes #0 = {

View File

@ -24,7 +24,7 @@ if.end: ; preds = %if.then, %entry
; CHECK-LABEL: @caller
; CHECK: codeRepl.i:
; CHECK-NOT: br label
; CHECK: call void @callee.2.if.then(i32 %v, i32* %mul.loc.i), !dbg ![[DBG2:[0-9]+]]
; CHECK: call void @callee.2.if.then(i32 %v, ptr %mul.loc.i), !dbg ![[DBG2:[0-9]+]]
define i32 @caller(i32 %v) !dbg !8 {
entry:
%call = call i32 @callee(i32 %v), !dbg !14
@ -55,7 +55,7 @@ if.end:
; CHECK-LABEL: @caller2
; CHECK: codeRepl.i:
; CHECK-NOT: br label
; CHECK: call void @callee2.1.if.then(i32 %v, i32* %sub.loc.i), !dbg ![[DBG4:[0-9]+]]
; CHECK: call void @callee2.1.if.then(i32 %v, ptr %sub.loc.i), !dbg ![[DBG4:[0-9]+]]
define i32 @caller2(i32 %v) !dbg !21 {
entry:
%call = call i32 @callee2(i32 %v), !dbg !22

View File

@ -20,7 +20,7 @@ if.end: ; preds = %if.then, %entry
; CHECK-LABEL: @caller
; CHECK: codeRepl.i:
; CHECK-NOT: br label
; CHECK: call void (i32, i32*, ...) @callee.1.if.then(i32 %v, i32* %mul.loc.i, i32 99), !dbg ![[DBG2:[0-9]+]]
; CHECK: call void (i32, ptr, ...) @callee.1.if.then(i32 %v, ptr %mul.loc.i, i32 99), !dbg ![[DBG2:[0-9]+]]
define i32 @caller(i32 %v) !dbg !8 {
entry:
%call = call i32 (i32, ...) @callee(i32 %v, i32 99), !dbg !14

View File

@ -5,11 +5,11 @@ target triple = "x86_64-apple-macosx10.11.0"
@staticvar = internal global i32 1, align 4
@staticconstvar = internal unnamed_addr constant [2 x i32] [i32 10, i32 20], align 4
@commonvar = common global i32 0, align 4
@P = internal global void ()* null, align 8
@P = internal global ptr null, align 8
@weakalias = weak alias void (...), bitcast (void ()* @globalfunc1 to void (...)*)
@analias = alias void (...), bitcast (void ()* @globalfunc2 to void (...)*)
@linkoncealias = alias void (...), bitcast (void ()* @linkoncefunc to void (...)*)
@weakalias = weak alias void (...), ptr @globalfunc1
@analias = alias void (...), ptr @globalfunc2
@linkoncealias = alias void (...), ptr @linkoncefunc
define void @globalfunc1() #0 {
entry:
@ -31,14 +31,14 @@ entry:
define i32 @referencestatics(i32 %i) #0 {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
store i32 %i, ptr %i.addr, align 4
%call = call i32 @staticfunc()
%0 = load i32, i32* @staticvar, align 4
%0 = load i32, ptr @staticvar, align 4
%add = add nsw i32 %call, %0
%1 = load i32, i32* %i.addr, align 4
%1 = load i32, ptr %i.addr, align 4
%idxprom = sext i32 %1 to i64
%arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* @staticconstvar, i64 0, i64 %idxprom
%2 = load i32, i32* %arrayidx, align 4
%arrayidx = getelementptr inbounds [2 x i32], ptr @staticconstvar, i64 0, i64 %idxprom
%2 = load i32, ptr %arrayidx, align 4
%add1 = add nsw i32 %add, %2
ret i32 %add1
}
@ -46,29 +46,29 @@ entry:
define i32 @referenceglobals(i32 %i) #0 {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
store i32 %i, ptr %i.addr, align 4
call void @globalfunc1()
%0 = load i32, i32* @globalvar, align 4
%0 = load i32, ptr @globalvar, align 4
ret i32 %0
}
define i32 @referencecommon(i32 %i) #0 {
entry:
%i.addr = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
%0 = load i32, i32* @commonvar, align 4
store i32 %i, ptr %i.addr, align 4
%0 = load i32, ptr @commonvar, align 4
ret i32 %0
}
define void @setfuncptr() #0 {
entry:
store void ()* @staticfunc2, void ()** @P, align 8
store ptr @staticfunc2, ptr @P, align 8
ret void
}
define void @callfuncptr() #0 {
entry:
%0 = load void ()*, void ()** @P, align 8
%0 = load ptr, ptr @P, align 8
call void %0()
ret void
}
@ -92,7 +92,7 @@ entry:
declare i32 @__gxx_personality_v0(...)
; Add enough instructions to prevent import with inst limit of 5
define internal void @funcwithpersonality() #2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
define internal void @funcwithpersonality() #2 personality ptr @__gxx_personality_v0 {
entry:
call void @globalfunc2()
call void @globalfunc2()
@ -159,10 +159,9 @@ define void @variadic_no_va_start(...) {
; Variadic function with va_start should not be imported because inliner
; doesn't handle it.
define void @variadic_va_start(...) {
%ap = alloca i8*, align 8
%ap.0 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap.0)
%ap = alloca ptr, align 8
call void @llvm.va_start(ptr %ap)
ret void
}
declare void @llvm.va_start(i8*) nounwind
declare void @llvm.va_start(ptr) nounwind

View File

@ -91,9 +91,9 @@ declare void @callfuncptr(...) #1
; Ensure that all uses of local variable @P which has used in setfuncptr
; and callfuncptr are to the same promoted/renamed global.
; CHECK-DAG: @P.llvm.{{.*}} = available_externally hidden global void ()* null
; CHECK-DAG: %0 = load void ()*, void ()** @P.llvm.
; CHECK-DAG: store void ()* @staticfunc2.llvm.{{.*}}, void ()** @P.llvm.
; CHECK-DAG: @P.llvm.{{.*}} = available_externally hidden global ptr null
; CHECK-DAG: %0 = load ptr, ptr @P.llvm.
; CHECK-DAG: store ptr @staticfunc2.llvm.{{.*}}, ptr @P.llvm.
; Ensure that @referencelargelinkonce definition is pulled in, but later we
; also check that the linkonceodr function is not.
@ -110,7 +110,7 @@ declare void @weakfunc(...) #1
declare void @linkoncefunc2(...) #1
; INSTLIMDEF-DAG: Import funcwithpersonality
; INSTLIMDEF-DAG: define available_externally hidden void @funcwithpersonality.llvm.{{.*}}() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !thinlto_src_module !0 {
; INSTLIMDEF-DAG: define available_externally hidden void @funcwithpersonality.llvm.{{.*}}() personality ptr @__gxx_personality_v0 !thinlto_src_module !0 {
; INSTLIM5-DAG: declare hidden void @funcwithpersonality.llvm.{{.*}}()
; We can import variadic functions without a va_start, since the inliner

View File

@ -14,7 +14,7 @@ entry:
ret i32 0
}
@analias = alias void (), void ()* @globalfunc
@analias = alias void (), ptr @globalfunc
define void @globalfunc() #0 {
entry:

View File

@ -4,7 +4,7 @@
; CHECK-LABEL: void @empty()
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = atomicrmw add i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0), i64 1 monotonic, align 8, !dbg [[DBG:![0-9]+]]
; CHECK-NEXT: %0 = atomicrmw add ptr @__llvm_gcov_ctr, i64 1 monotonic, align 8, !dbg [[DBG:![0-9]+]]
; CHECK-NEXT: ret void, !dbg [[DBG]]
define dso_local void @empty() !dbg !5 {

View File

@ -22,43 +22,43 @@ target triple = "x86_64-apple-macosx10.10.0"
;
; GCDA: [[FILE_LOOP_HEADER]]:
; GCDA-NEXT: %[[IV:.*]] = phi i32 [ 0, %entry ], [ %[[NEXT_IV:.*]], %[[FILE_LOOP_LATCH:.*]] ]
; GCDA-NEXT: %[[FILE_INFO:.*]] = getelementptr inbounds {{.*}}, {{.*}}* @__llvm_internal_gcov_emit_file_info, i32 0, i32 %[[IV]]
; GCDA-NEXT: %[[START_FILE_ARGS:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[FILE_INFO]], i32 0, i32 0
; GCDA-NEXT: %[[START_FILE_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[START_FILE_ARGS]], i32 0, i32 0
; GCDA-NEXT: %[[START_FILE_ARG_0:.*]] = load i8*, i8** %[[START_FILE_ARG_0_PTR]]
; GCDA-NEXT: %[[START_FILE_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[START_FILE_ARGS]], i32 0, i32 1
; GCDA-NEXT: %[[START_FILE_ARG_1:.*]] = load i32, i32* %[[START_FILE_ARG_1_PTR]]
; GCDA-NEXT: %[[START_FILE_ARG_2_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[START_FILE_ARGS]], i32 0, i32 2
; GCDA-NEXT: %[[START_FILE_ARG_2:.*]] = load i32, i32* %[[START_FILE_ARG_2_PTR]]
; GCDA-NEXT: call void @llvm_gcda_start_file(i8* %[[START_FILE_ARG_0]], i32 %[[START_FILE_ARG_1]], i32 %[[START_FILE_ARG_2]])
; GCDA-NEXT: %[[NUM_COUNTERS_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[FILE_INFO]], i32 0, i32 1
; GCDA-NEXT: %[[NUM_COUNTERS:.*]] = load i32, i32* %[[NUM_COUNTERS_PTR]]
; GCDA-NEXT: %[[EMIT_FUN_ARGS_ARRAY_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[FILE_INFO]], i32 0, i32 2
; GCDA-NEXT: %[[EMIT_FUN_ARGS_ARRAY:.*]] = load {{.*}}*, {{.*}}** %[[EMIT_FUN_ARGS_ARRAY_PTR]]
; GCDA-NEXT: %[[EMIT_ARCS_ARGS_ARRAY_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[FILE_INFO]], i32 0, i32 3
; GCDA-NEXT: %[[EMIT_ARCS_ARGS_ARRAY:.*]] = load {{.*}}*, {{.*}}** %[[EMIT_ARCS_ARGS_ARRAY_PTR]]
; GCDA-NEXT: %[[FILE_INFO:.*]] = getelementptr inbounds {{.*}}, ptr @__llvm_internal_gcov_emit_file_info, i32 0, i32 %[[IV]]
; GCDA-NEXT: %[[START_FILE_ARGS:.*]] = getelementptr inbounds {{.*}}, ptr %[[FILE_INFO]], i32 0, i32 0
; GCDA-NEXT: %[[START_FILE_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[START_FILE_ARGS]], i32 0, i32 0
; GCDA-NEXT: %[[START_FILE_ARG_0:.*]] = load ptr, ptr %[[START_FILE_ARG_0_PTR]]
; GCDA-NEXT: %[[START_FILE_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[START_FILE_ARGS]], i32 0, i32 1
; GCDA-NEXT: %[[START_FILE_ARG_1:.*]] = load i32, ptr %[[START_FILE_ARG_1_PTR]]
; GCDA-NEXT: %[[START_FILE_ARG_2_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[START_FILE_ARGS]], i32 0, i32 2
; GCDA-NEXT: %[[START_FILE_ARG_2:.*]] = load i32, ptr %[[START_FILE_ARG_2_PTR]]
; GCDA-NEXT: call void @llvm_gcda_start_file(ptr %[[START_FILE_ARG_0]], i32 %[[START_FILE_ARG_1]], i32 %[[START_FILE_ARG_2]])
; GCDA-NEXT: %[[NUM_COUNTERS_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[FILE_INFO]], i32 0, i32 1
; GCDA-NEXT: %[[NUM_COUNTERS:.*]] = load i32, ptr %[[NUM_COUNTERS_PTR]]
; GCDA-NEXT: %[[EMIT_FUN_ARGS_ARRAY_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[FILE_INFO]], i32 0, i32 2
; GCDA-NEXT: %[[EMIT_FUN_ARGS_ARRAY:.*]] = load ptr, ptr %[[EMIT_FUN_ARGS_ARRAY_PTR]]
; GCDA-NEXT: %[[EMIT_ARCS_ARGS_ARRAY_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[FILE_INFO]], i32 0, i32 3
; GCDA-NEXT: %[[EMIT_ARCS_ARGS_ARRAY:.*]] = load ptr, ptr %[[EMIT_ARCS_ARGS_ARRAY_PTR]]
; GCDA-NEXT: %[[ENTER_COUNTER_LOOP_COND:.*]] = icmp slt i32 0, %[[NUM_COUNTERS]]
; GCDA-NEXT: br i1 %[[ENTER_COUNTER_LOOP_COND]], label %[[COUNTER_LOOP:.*]], label %[[FILE_LOOP_LATCH]]
;
; GCDA: [[COUNTER_LOOP]]:
; GCDA-NEXT: %[[JV:.*]] = phi i32 [ 0, %[[FILE_LOOP_HEADER]] ], [ %[[NEXT_JV:.*]], %[[COUNTER_LOOP]] ]
; GCDA-NEXT: %[[EMIT_FUN_ARGS:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_FUN_ARGS_ARRAY]], i32 %[[JV]]
; GCDA-NEXT: %[[EMIT_FUN_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_FUN_ARGS]], i32 0, i32 0
; GCDA-NEXT: %[[EMIT_FUN_ARG_0:.*]] = load i32, i32* %[[EMIT_FUN_ARG_0_PTR]]
; GCDA-NEXT: %[[EMIT_FUN_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_FUN_ARGS]], i32 0, i32 1
; GCDA-NEXT: %[[EMIT_FUN_ARG_1:.*]] = load i32, i32* %[[EMIT_FUN_ARG_1_PTR]]
; GCDA-NEXT: %[[EMIT_FUN_ARG_2_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_FUN_ARGS]], i32 0, i32 2
; GCDA-NEXT: %[[EMIT_FUN_ARG_2:.*]] = load i32, i32* %[[EMIT_FUN_ARG_2_PTR]]
; GCDA-NEXT: %[[EMIT_FUN_ARGS:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_FUN_ARGS_ARRAY]], i32 %[[JV]]
; GCDA-NEXT: %[[EMIT_FUN_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_FUN_ARGS]], i32 0, i32 0
; GCDA-NEXT: %[[EMIT_FUN_ARG_0:.*]] = load i32, ptr %[[EMIT_FUN_ARG_0_PTR]]
; GCDA-NEXT: %[[EMIT_FUN_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_FUN_ARGS]], i32 0, i32 1
; GCDA-NEXT: %[[EMIT_FUN_ARG_1:.*]] = load i32, ptr %[[EMIT_FUN_ARG_1_PTR]]
; GCDA-NEXT: %[[EMIT_FUN_ARG_2_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_FUN_ARGS]], i32 0, i32 2
; GCDA-NEXT: %[[EMIT_FUN_ARG_2:.*]] = load i32, ptr %[[EMIT_FUN_ARG_2_PTR]]
; GCDA-NEXT: call void @llvm_gcda_emit_function(i32 %[[EMIT_FUN_ARG_0]],
; GCDA-SAME: i32 %[[EMIT_FUN_ARG_1]],
; GCDA-SAME: i32 %[[EMIT_FUN_ARG_2]])
; GCDA-NEXT: %[[EMIT_ARCS_ARGS:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_ARCS_ARGS_ARRAY]], i32 %[[JV]]
; GCDA-NEXT: %[[EMIT_ARCS_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_ARCS_ARGS]], i32 0, i32 0
; GCDA-NEXT: %[[EMIT_ARCS_ARG_0:.*]] = load i32, i32* %[[EMIT_ARCS_ARG_0_PTR]]
; GCDA-NEXT: %[[EMIT_ARCS_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, {{.*}}* %[[EMIT_ARCS_ARGS]], i32 0, i32 1
; GCDA-NEXT: %[[EMIT_ARCS_ARG_1:.*]] = load i64*, i64** %[[EMIT_ARCS_ARG_1_PTR]]
; GCDA-NEXT: %[[EMIT_ARCS_ARGS:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_ARCS_ARGS_ARRAY]], i32 %[[JV]]
; GCDA-NEXT: %[[EMIT_ARCS_ARG_0_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_ARCS_ARGS]], i32 0, i32 0
; GCDA-NEXT: %[[EMIT_ARCS_ARG_0:.*]] = load i32, ptr %[[EMIT_ARCS_ARG_0_PTR]]
; GCDA-NEXT: %[[EMIT_ARCS_ARG_1_PTR:.*]] = getelementptr inbounds {{.*}}, ptr %[[EMIT_ARCS_ARGS]], i32 0, i32 1
; GCDA-NEXT: %[[EMIT_ARCS_ARG_1:.*]] = load ptr, ptr %[[EMIT_ARCS_ARG_1_PTR]]
; GCDA-NEXT: call void @llvm_gcda_emit_arcs(i32 %[[EMIT_ARCS_ARG_0]],
; GCDA-SAME: i64* %[[EMIT_ARCS_ARG_1]])
; GCDA-SAME: ptr %[[EMIT_ARCS_ARG_1]])
; GCDA-NEXT: %[[NEXT_JV]] = add i32 %[[JV]], 1
; GCDA-NEXT: %[[COUNTER_LOOP_COND:.*]] = icmp slt i32 %[[NEXT_JV]], %[[NUM_COUNTERS]]
; GCDA-NEXT: br i1 %[[COUNTER_LOOP_COND]], label %[[COUNTER_LOOP]], label %[[FILE_LOOP_LATCH]]

View File

@ -12,9 +12,9 @@ define dso_local i32 @no_instr(i32 %a) noprofile !dbg !9 {
define dso_local i32 @instr(i32 %a) !dbg !28 {
; CHECK-LABEL: @instr(
; CHECK-NEXT: [[GCOV_CTR:%.*]] = load i64, i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0), align 4, !dbg [[DBG8:![0-9]+]]
; CHECK-NEXT: [[GCOV_CTR:%.*]] = load i64, ptr @__llvm_gcov_ctr, align 4, !dbg [[DBG8:![0-9]+]]
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[GCOV_CTR]], 1, !dbg [[DBG8]]
; CHECK-NEXT: store i64 [[TMP1]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @__llvm_gcov_ctr, i64 0, i64 0), align 4, !dbg [[DBG8]]
; CHECK-NEXT: store i64 [[TMP1]], ptr @__llvm_gcov_ctr, align 4, !dbg [[DBG8]]
; CHECK-NEXT: ret i32 42, !dbg [[DBG8]]
;
ret i32 42, !dbg !44

View File

@ -15,8 +15,8 @@ entry:
; CHECK: define internal void @__llvm_gcov_reset()
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* bitcast ([1 x i64]* @__llvm_gcov_ctr to i8*), i8 0, i64 8, i1 false)
; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* bitcast ([1 x i64]* @__llvm_gcov_ctr.1 to i8*), i8 0, i64 8, i1 false)
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr @__llvm_gcov_ctr, i8 0, i64 8, i1 false)
; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr @__llvm_gcov_ctr.1, i8 0, i64 8, i1 false)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5, !6}

View File

@ -9,7 +9,7 @@ target triple = "x86_64-apple-macosx10.14.0"
; CHECK-LABEL: define {{.*}}@foo.cold.1(
; CHECK: call {{.*}}@sink
; CHECK: %p.ce = phi i32 [ 1, %coldbb ], [ 3, %coldbb2 ]
; CHECK-NEXT: store i32 %p.ce, i32* %p.ce.out
; CHECK-NEXT: store i32 %p.ce, ptr %p.ce.out
define void @foo(i32 %cond) {
entry:

View File

@ -12,10 +12,9 @@ target triple = "x86_64-apple-macosx10.14.0"
; CHECK-NEXT: ]
;
; CHECK: codeRepl:
; CHECK-NEXT: bitcast
; CHECK-NEXT: lifetime.start
; CHECK-NEXT: call void @pluto.cold.1(i1* %tmp8.ce.loc)
; CHECK-NEXT: %tmp8.ce.reload = load i1, i1* %tmp8.ce.loc
; CHECK-NEXT: call void @pluto.cold.1(ptr %tmp8.ce.loc)
; CHECK-NEXT: %tmp8.ce.reload = load i1, ptr %tmp8.ce.loc
; CHECK-NEXT: lifetime.end
; CHECK-NEXT: br label %bb7
;

View File

@ -46,11 +46,10 @@ bb5:
; CHECK-LABEL: @f1(
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32 0)
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 0)
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
@ -62,11 +61,10 @@ bb5:
; CHECK-LABEL: @f2(
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32 1)
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: [[TMP0:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 1)
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: br i1 [[TMP0]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
@ -103,10 +101,10 @@ bb5:
; CHECK-NEXT: switch i32 [[TMP1]], label [[FINAL_BLOCK_0:%.*]] [
; CHECK-NEXT: ]
; CHECK: output_block_0_1:
; CHECK-NEXT: store i32 [[PHINODE_CE]], i32* [[TMP0:%.*]], align 4
; CHECK-NEXT: store i32 [[PHINODE_CE]], ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_1]]
; CHECK: output_block_1_1:
; CHECK-NEXT: store i32 [[TMP7]], i32* [[TMP0]], align 4
; CHECK-NEXT: store i32 [[TMP7]], ptr [[TMP0]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_1]]
; CHECK: final_block_0:
; CHECK-NEXT: ret i1 false

View File

@ -49,15 +49,13 @@ bb5:
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE1_CE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[PHINODE1_CE_LOC]] to i8*
; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32* [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE1_CE_LOC]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE1_CE_LOC]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
@ -71,15 +69,13 @@ bb5:
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE1_CE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
; CHECK-NEXT: [[LT_CAST1:%.*]] = bitcast i32* [[PHINODE1_CE_LOC]] to i8*
; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST1]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32* [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE1_CE_LOC]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST1]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[TARGETBLOCK:%.*]] = call i1 @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: [[PHINODE1_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE1_CE_LOC]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE1_CE_LOC]])
; CHECK-NEXT: br i1 [[TARGETBLOCK]], label [[BB5:%.*]], label [[BB1_AFTER_OUTLINE:%.*]]
; CHECK: bb1_after_outline:
; CHECK-NEXT: ret void
@ -109,8 +105,8 @@ bb5:
; CHECK-NEXT: [[PHINODE1_CE:%.*]] = phi i32 [ 5, [[BB1_TO_OUTLINE]] ], [ 5, [[BB2]] ]
; CHECK-NEXT: br label [[BB5_EXITSTUB:%.*]]
; CHECK: bb5.exitStub:
; CHECK-NEXT: store i32 [[PHINODE_CE]], i32* [[TMP0:%.*]], align 4
; CHECK-NEXT: store i32 [[PHINODE1_CE]], i32* [[TMP1:%.*]], align 4
; CHECK-NEXT: store i32 [[PHINODE_CE]], ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: store i32 [[PHINODE1_CE]], ptr [[TMP1:%.*]], align 4
; CHECK-NEXT: ret i1 true
; CHECK: bb1_after_outline.exitStub:
; CHECK-NEXT: ret i1 false

View File

@ -42,21 +42,19 @@ bb5:
; CHECK-NEXT: bb1:
; CHECK-NEXT: [[PHINODE_CE_LOC1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[PHINODE_CE_LOC:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[LT_CAST:%.*]] = bitcast i32* [[PHINODE_CE_LOC]] to i8*
; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[PHINODE_CE_LOC]], i32 0)
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, i32* [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[PHINODE_CE_LOC]], i32 0)
; CHECK-NEXT: [[PHINODE_CE_RELOAD:%.*]] = load i32, ptr [[PHINODE_CE_LOC]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC]])
; CHECK-NEXT: br label [[BB5:%.*]]
; CHECK: placeholder:
; CHECK-NEXT: [[A:%.*]] = sub i32 5, 4
; CHECK-NEXT: br label [[BB5]]
; CHECK: bb3:
; CHECK-NEXT: [[LT_CAST3:%.*]] = bitcast i32* [[PHINODE_CE_LOC1]] to i8*
; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 -1, i8* [[LT_CAST3]])
; CHECK-NEXT: call void @outlined_ir_func_0(i32* [[PHINODE_CE_LOC1]], i32 1)
; CHECK-NEXT: [[PHINODE_CE_RELOAD2:%.*]] = load i32, i32* [[PHINODE_CE_LOC1]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 -1, i8* [[LT_CAST3]])
; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[PHINODE_CE_LOC1]])
; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[PHINODE_CE_LOC1]], i32 1)
; CHECK-NEXT: [[PHINODE_CE_RELOAD2:%.*]] = load i32, ptr [[PHINODE_CE_LOC1]], align 4
; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[PHINODE_CE_LOC1]])
; CHECK-NEXT: br label [[BB5]]
; CHECK: placeholder1:
; CHECK-NEXT: [[B:%.*]] = add i32 5, 4
@ -88,10 +86,10 @@ bb5:
; CHECK-NEXT: i32 1, label [[OUTPUT_BLOCK_1_0:%.*]]
; CHECK-NEXT: ]
; CHECK: output_block_0_0:
; CHECK-NEXT: store i32 [[PHINODE_CE]], i32* [[TMP0:%.*]], align 4
; CHECK-NEXT: store i32 [[PHINODE_CE]], ptr [[TMP0:%.*]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: output_block_1_0:
; CHECK-NEXT: store i32 [[TMP7]], i32* [[TMP0]], align 4
; CHECK-NEXT: store i32 [[TMP7]], ptr [[TMP0]], align 4
; CHECK-NEXT: br label [[FINAL_BLOCK_0]]
; CHECK: final_block_0:
; CHECK-NEXT: ret void

Some files were not shown because too many files have changed in this diff Show More