2012-12-19 19:22:04 +08:00
|
|
|
; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
|
2013-11-21 19:37:16 +08:00
|
|
|
; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK -check-prefix=CHECK-ORIGINS %s
|
2013-05-28 21:07:43 +08:00
|
|
|
|
2012-11-29 17:57:20 +08:00
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
|
2013-05-21 20:27:47 +08:00
|
|
|
target triple = "x86_64-unknown-linux-gnu"
|
2012-11-29 17:57:20 +08:00
|
|
|
|
2016-03-16 04:25:47 +08:00
|
|
|
; CHECK: @llvm.global_ctors {{.*}} { i32 0, void ()* @msan.module_ctor, i8* null }
|
2012-11-29 17:57:20 +08:00
|
|
|
|
2013-05-16 17:14:05 +08:00
|
|
|
; Check the presence and the linkage type of __msan_track_origins and
|
|
|
|
; other interface symbols.
|
2013-05-31 20:04:29 +08:00
|
|
|
; CHECK-NOT: @__msan_track_origins
|
|
|
|
; CHECK-ORIGINS: @__msan_track_origins = weak_odr constant i32 1
|
|
|
|
; CHECK-NOT: @__msan_keep_going = weak_odr constant i32 0
|
2013-05-16 17:14:05 +08:00
|
|
|
; CHECK: @__msan_retval_tls = external thread_local(initialexec) global [{{.*}}]
|
|
|
|
; CHECK: @__msan_retval_origin_tls = external thread_local(initialexec) global i32
|
|
|
|
; CHECK: @__msan_param_tls = external thread_local(initialexec) global [{{.*}}]
|
|
|
|
; CHECK: @__msan_param_origin_tls = external thread_local(initialexec) global [{{.*}}]
|
|
|
|
; CHECK: @__msan_va_arg_tls = external thread_local(initialexec) global [{{.*}}]
|
|
|
|
; CHECK: @__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64
|
|
|
|
; CHECK: @__msan_origin_tls = external thread_local(initialexec) global i32
|
2012-12-05 20:49:41 +08:00
|
|
|
|
2012-12-14 21:43:11 +08:00
|
|
|
|
2012-12-06 19:41:03 +08:00
|
|
|
; Check instrumentation of stores
|
2012-12-14 21:43:11 +08:00
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
|
2012-12-06 19:41:03 +08:00
|
|
|
entry:
|
|
|
|
store i32 %x, i32* %p, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @Store
|
2012-12-06 19:41:03 +08:00
|
|
|
; CHECK: load {{.*}} @__msan_param_tls
|
2013-11-21 19:37:16 +08:00
|
|
|
; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
|
2012-12-06 19:41:03 +08:00
|
|
|
; CHECK: store
|
|
|
|
; CHECK-ORIGINS: icmp
|
|
|
|
; CHECK-ORIGINS: br i1
|
|
|
|
; CHECK-ORIGINS: <label>
|
|
|
|
; CHECK-ORIGINS: store
|
|
|
|
; CHECK-ORIGINS: br label
|
|
|
|
; CHECK-ORIGINS: <label>
|
2013-11-21 19:37:16 +08:00
|
|
|
; CHECK: store
|
|
|
|
; CHECK: ret void
|
2012-12-06 19:41:03 +08:00
|
|
|
|
|
|
|
|
2012-12-14 21:43:11 +08:00
|
|
|
; Check instrumentation of aligned stores
|
|
|
|
; Shadow store has the same alignment as the original store; origin store
|
|
|
|
; does not specify explicit alignment.
|
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define void @AlignedStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
|
2012-12-14 21:43:11 +08:00
|
|
|
entry:
|
|
|
|
store i32 %x, i32* %p, align 32
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @AlignedStore
|
2012-12-14 21:43:11 +08:00
|
|
|
; CHECK: load {{.*}} @__msan_param_tls
|
2013-11-21 19:37:16 +08:00
|
|
|
; CHECK-ORIGINS: load {{.*}} @__msan_param_origin_tls
|
2012-12-14 21:43:11 +08:00
|
|
|
; CHECK: store {{.*}} align 32
|
|
|
|
; CHECK-ORIGINS: icmp
|
|
|
|
; CHECK-ORIGINS: br i1
|
|
|
|
; CHECK-ORIGINS: <label>
|
2012-12-26 19:55:09 +08:00
|
|
|
; CHECK-ORIGINS: store {{.*}} align 32
|
2012-12-14 21:43:11 +08:00
|
|
|
; CHECK-ORIGINS: br label
|
|
|
|
; CHECK-ORIGINS: <label>
|
2013-11-21 19:37:16 +08:00
|
|
|
; CHECK: store {{.*}} align 32
|
|
|
|
; CHECK: ret void
|
2012-12-14 21:43:11 +08:00
|
|
|
|
|
|
|
|
2012-11-29 17:57:20 +08:00
|
|
|
; load followed by cmp: check that we load the shadow and call __msan_warning.
|
2013-02-28 19:25:14 +08:00
|
|
|
define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%0 = load i32, i32* %a, align 4
|
2012-11-29 17:57:20 +08:00
|
|
|
%tobool = icmp eq i32 %0, 0
|
|
|
|
br i1 %tobool, label %if.end, label %if.then
|
|
|
|
|
|
|
|
if.then: ; preds = %entry
|
[opaque pointer type] Add textual IR support for explicit type parameter to the call instruction
See r230786 and r230794 for similar changes to gep and load
respectively.
Call is a bit different because it often doesn't have a single explicit
type - usually the type is deduced from the arguments, and just the
return type is explicit. In those cases there's no need to change the
IR.
When that's not the case, the IR usually contains the pointer type of
the first operand - but since typed pointers are going away, that
representation is insufficient so I'm just stripping the "pointerness"
of the explicit type away.
This does make the IR a bit weird - it /sort of/ reads like the type of
the first operand: "call void () %x(" but %x is actually of type "void
()*" and will eventually be just of type "ptr". But this seems not too
bad and I don't think it would benefit from repeating the type
("void (), void () * %x(" and then eventually "void (), ptr %x(") as has
been done with gep and load.
This also has a side benefit: since the explicit type is no longer a
pointer, there's no ambiguity between an explicit type and a function
that returns a function pointer. Previously this case needed an explicit
type (eg: a function returning a void() function was written as
"call void () () * @x(" rather than "call void () * @x(" because of the
ambiguity between a function returning a pointer to a void() function
and a function returning void).
No ambiguity means even function pointer return types can just be
written alone, without writing the whole function's type.
This leaves /only/ the varargs case where the explicit type is required.
Given the special type syntax in call instructions, the regex-fu used
for migration was a bit more involved in its own unique way (as every
one of these is) so here it is. Use it in conjunction with the apply.sh
script and associated find/xargs commands I've provided in rr230786 to
migrate your out of tree tests. Do let me know if any of this doesn't
cover your cases & we can iterate on a more general script/regexes to
help others with out of tree tests.
About 9 test cases couldn't be automatically migrated - half of those
were functions returning function pointers, where I just had to manually
delete the function argument types now that we didn't need an explicit
function type there. The other half were typedefs of function types used
in calls - just had to manually drop the * from those.
import fileinput
import sys
import re
pat = re.compile(r'((?:=|:|^|\s)call\s(?:[^@]*?))(\s*$|\s*(?:(?:\[\[[a-zA-Z0-9_]+\]\]|[@%](?:(")?[\\\?@a-zA-Z0-9_.]*?(?(3)"|)|{{.*}}))(?:\(|$)|undef|inttoptr|bitcast|null|asm).*$)')
addrspace_end = re.compile(r"addrspace\(\d+\)\s*\*$")
func_end = re.compile("(?:void.*|\)\s*)\*$")
def conv(match, line):
if not match or re.search(addrspace_end, match.group(1)) or not re.search(func_end, match.group(1)):
return line
return line[:match.start()] + match.group(1)[:match.group(1).rfind('*')].rstrip() + match.group(2) + line[match.end():]
for line in sys.stdin:
sys.stdout.write(conv(re.search(pat, line), line))
llvm-svn: 235145
2015-04-17 07:24:18 +08:00
|
|
|
tail call void (...) @foo() nounwind
|
2012-11-29 17:57:20 +08:00
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.end: ; preds = %entry, %if.then
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
declare void @foo(...)
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @LoadAndCmp
|
2012-11-29 17:57:20 +08:00
|
|
|
; CHECK: = load
|
|
|
|
; CHECK: = load
|
|
|
|
; CHECK: call void @__msan_warning_noreturn()
|
2012-11-29 21:11:09 +08:00
|
|
|
; CHECK-NEXT: call void asm sideeffect
|
|
|
|
; CHECK-NEXT: unreachable
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret void
|
2012-11-29 17:57:20 +08:00
|
|
|
|
|
|
|
; Check that we store the shadow for the retval.
|
2013-02-28 19:25:14 +08:00
|
|
|
define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
|
|
|
ret i32 123
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @ReturnInt
|
2012-11-29 17:57:20 +08:00
|
|
|
; CHECK: store i32 0,{{.*}}__msan_retval_tls
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret i32
|
2012-11-29 17:57:20 +08:00
|
|
|
|
|
|
|
; Check that we get the shadow for the retval.
|
2013-02-28 19:25:14 +08:00
|
|
|
define void @CopyRetVal(i32* nocapture %a) nounwind uwtable sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
|
|
|
%call = tail call i32 @ReturnInt() nounwind
|
|
|
|
store i32 %call, i32* %a, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @CopyRetVal
|
2012-11-29 17:57:20 +08:00
|
|
|
; CHECK: load{{.*}}__msan_retval_tls
|
|
|
|
; CHECK: store
|
|
|
|
; CHECK: store
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret void
|
2012-11-29 17:57:20 +08:00
|
|
|
|
|
|
|
|
|
|
|
; Check that we generate PHIs for shadow.
|
2013-02-28 19:25:14 +08:00
|
|
|
define void @FuncWithPhi(i32* nocapture %a, i32* %b, i32* nocapture %c) nounwind uwtable sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
|
|
|
%tobool = icmp eq i32* %b, null
|
|
|
|
br i1 %tobool, label %if.else, label %if.then
|
|
|
|
|
|
|
|
if.then: ; preds = %entry
|
2015-02-28 05:17:42 +08:00
|
|
|
%0 = load i32, i32* %b, align 4
|
2012-11-29 17:57:20 +08:00
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.else: ; preds = %entry
|
2015-02-28 05:17:42 +08:00
|
|
|
%1 = load i32, i32* %c, align 4
|
2012-11-29 17:57:20 +08:00
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.end: ; preds = %if.else, %if.then
|
|
|
|
%t.0 = phi i32 [ %0, %if.then ], [ %1, %if.else ]
|
|
|
|
store i32 %t.0, i32* %a, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @FuncWithPhi
|
2012-11-29 17:57:20 +08:00
|
|
|
; CHECK: = phi
|
|
|
|
; CHECK-NEXT: = phi
|
|
|
|
; CHECK: store
|
|
|
|
; CHECK: store
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret void
|
2012-11-29 17:57:20 +08:00
|
|
|
|
|
|
|
; Compute shadow for "x << 10"
|
2013-02-28 19:25:14 +08:00
|
|
|
define void @ShlConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%0 = load i32, i32* %x, align 4
|
2012-11-29 17:57:20 +08:00
|
|
|
%1 = shl i32 %0, 10
|
|
|
|
store i32 %1, i32* %x, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @ShlConst
|
2012-11-29 17:57:20 +08:00
|
|
|
; CHECK: = load
|
|
|
|
; CHECK: = load
|
|
|
|
; CHECK: shl
|
|
|
|
; CHECK: shl
|
|
|
|
; CHECK: store
|
|
|
|
; CHECK: store
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret void
|
2012-11-29 17:57:20 +08:00
|
|
|
|
|
|
|
; Compute shadow for "10 << x": it should have 'sext i1'.
|
2013-02-28 19:25:14 +08:00
|
|
|
define void @ShlNonConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%0 = load i32, i32* %x, align 4
|
2012-11-29 17:57:20 +08:00
|
|
|
%1 = shl i32 10, %0
|
|
|
|
store i32 %1, i32* %x, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @ShlNonConst
|
2012-11-29 17:57:20 +08:00
|
|
|
; CHECK: = load
|
|
|
|
; CHECK: = load
|
|
|
|
; CHECK: = sext i1
|
|
|
|
; CHECK: store
|
|
|
|
; CHECK: store
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret void
|
2012-11-29 17:57:20 +08:00
|
|
|
|
|
|
|
; SExt
|
2013-02-28 19:25:14 +08:00
|
|
|
define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%0 = load i16, i16* %b, align 2
|
2012-11-29 17:57:20 +08:00
|
|
|
%1 = sext i16 %0 to i32
|
|
|
|
store i32 %1, i32* %a, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @SExt
|
2012-11-29 17:57:20 +08:00
|
|
|
; CHECK: = load
|
|
|
|
; CHECK: = load
|
|
|
|
; CHECK: = sext
|
|
|
|
; CHECK: = sext
|
|
|
|
; CHECK: store
|
|
|
|
; CHECK: store
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret void
|
2012-11-29 17:57:20 +08:00
|
|
|
|
|
|
|
|
|
|
|
; memset
|
2013-02-28 19:25:14 +08:00
|
|
|
define void @MemSet(i8* nocapture %x) nounwind uwtable sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
2015-11-19 13:56:52 +08:00
|
|
|
call void @llvm.memset.p0i8.i64(i8* %x, i8 42, i64 10, i32 1, i1 false)
|
2012-11-29 17:57:20 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-11-19 13:56:52 +08:00
|
|
|
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
|
2012-11-29 17:57:20 +08:00
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @MemSet
|
2012-11-29 20:43:56 +08:00
|
|
|
; CHECK: call i8* @__msan_memset
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret void
|
2012-11-29 17:57:20 +08:00
|
|
|
|
|
|
|
|
|
|
|
; memcpy
|
2013-02-28 19:25:14 +08:00
|
|
|
define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
2015-11-19 13:56:52 +08:00
|
|
|
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false)
|
2012-11-29 17:57:20 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-11-19 13:56:52 +08:00
|
|
|
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
|
2012-11-29 17:57:20 +08:00
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @MemCpy
|
2012-11-29 20:43:56 +08:00
|
|
|
; CHECK: call i8* @__msan_memcpy
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret void
|
2012-11-29 17:57:20 +08:00
|
|
|
|
|
|
|
|
|
|
|
; memmove is lowered to a call
|
2013-02-28 19:25:14 +08:00
|
|
|
define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
2015-11-19 13:56:52 +08:00
|
|
|
call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false)
|
2012-11-29 17:57:20 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-11-19 13:56:52 +08:00
|
|
|
declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
|
2012-11-29 17:57:20 +08:00
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @MemMove
|
2012-11-29 20:43:56 +08:00
|
|
|
; CHECK: call i8* @__msan_memmove
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret void
|
2012-11-29 17:57:20 +08:00
|
|
|
|
2017-07-18 09:06:54 +08:00
|
|
|
;; ------------
|
|
|
|
;; Placeholder tests that will fail once element atomic @llvm.mem[cpy|move|set] instrinsics have
|
|
|
|
;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to
|
|
|
|
;; verify that MSAN handles these intrinsics properly once they have been
|
|
|
|
;; added to that class hierarchy.
|
|
|
|
declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind
|
|
|
|
declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
|
|
|
|
declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32) nounwind
|
|
|
|
|
|
|
|
define void @atomic_memcpy(i8* nocapture %x, i8* nocapture %y) nounwind {
|
|
|
|
; CHECK-LABEL: atomic_memcpy
|
|
|
|
; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
|
|
|
|
; CHECK-NEXT: ret void
|
|
|
|
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @atomic_memmove(i8* nocapture %x, i8* nocapture %y) nounwind {
|
|
|
|
; CHECK-LABEL: atomic_memmove
|
|
|
|
; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
|
|
|
|
; CHECK-NEXT: ret void
|
|
|
|
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %x, i8* align 2 %y, i64 16, i32 1)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @atomic_memset(i8* nocapture %x) nounwind {
|
|
|
|
; CHECK-LABEL: atomic_memset
|
|
|
|
; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
|
|
|
|
; CHECK-NEXT: ret void
|
|
|
|
call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %x, i8 88, i64 16, i32 1)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
;; ------------
|
|
|
|
|
2012-11-29 17:57:20 +08:00
|
|
|
|
|
|
|
; Check that we propagate shadow for "select"
|
|
|
|
|
2013-11-21 19:37:16 +08:00
|
|
|
define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
2013-11-21 19:37:16 +08:00
|
|
|
%cond = select i1 %c, i32 %a, i32 %b
|
2012-11-29 17:57:20 +08:00
|
|
|
ret i32 %cond
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @Select
|
2014-03-25 21:08:34 +08:00
|
|
|
; CHECK: select i1
|
2014-03-25 22:15:14 +08:00
|
|
|
; CHECK-DAG: or i32
|
|
|
|
; CHECK-DAG: xor i32
|
|
|
|
; CHECK: or i32
|
2014-03-25 22:32:05 +08:00
|
|
|
; CHECK-DAG: select i1
|
|
|
|
; CHECK-ORIGINS-DAG: select
|
|
|
|
; CHECK-ORIGINS-DAG: select
|
|
|
|
; CHECK-DAG: select i1
|
2014-03-25 21:08:34 +08:00
|
|
|
; CHECK: store i32{{.*}}@__msan_retval_tls
|
|
|
|
; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret i32
|
2012-11-29 17:57:20 +08:00
|
|
|
|
|
|
|
|
2012-12-25 22:56:21 +08:00
|
|
|
; Check that we propagate origin for "select" with vector condition.
|
|
|
|
; Select condition is flattened to i1, which is then used to select one of the
|
|
|
|
; argument origins.
|
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory {
|
2012-12-25 22:56:21 +08:00
|
|
|
entry:
|
|
|
|
%cond = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %b
|
|
|
|
ret <8 x i16> %cond
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @SelectVector
|
2013-09-03 18:04:11 +08:00
|
|
|
; CHECK: select <8 x i1>
|
2014-03-25 22:15:14 +08:00
|
|
|
; CHECK-DAG: or <8 x i16>
|
|
|
|
; CHECK-DAG: xor <8 x i16>
|
|
|
|
; CHECK: or <8 x i16>
|
2014-03-25 22:32:05 +08:00
|
|
|
; CHECK-DAG: select <8 x i1>
|
|
|
|
; CHECK-ORIGINS-DAG: select
|
|
|
|
; CHECK-ORIGINS-DAG: select
|
|
|
|
; CHECK-DAG: select <8 x i1>
|
2014-03-25 21:08:34 +08:00
|
|
|
; CHECK: store <8 x i16>{{.*}}@__msan_retval_tls
|
|
|
|
; CHECK-ORIGINS: store i32{{.*}}@__msan_retval_origin_tls
|
2013-11-21 19:37:16 +08:00
|
|
|
; CHECK: ret <8 x i16>
|
|
|
|
|
2012-12-25 22:56:21 +08:00
|
|
|
|
2013-10-14 17:52:09 +08:00
|
|
|
; Check that we propagate origin for "select" with scalar condition and vector
|
|
|
|
; arguments. Select condition shadow is sign-extended to the vector type and
|
|
|
|
; mixed into the result shadow.
|
|
|
|
|
|
|
|
define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwtable readnone sanitize_memory {
|
|
|
|
entry:
|
|
|
|
%cond = select i1 %c, <8 x i16> %a, <8 x i16> %b
|
|
|
|
ret <8 x i16> %cond
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @SelectVector2
|
2013-10-14 17:52:09 +08:00
|
|
|
; CHECK: select i1
|
2014-03-25 22:15:14 +08:00
|
|
|
; CHECK-DAG: or <8 x i16>
|
|
|
|
; CHECK-DAG: xor <8 x i16>
|
2014-03-25 21:08:34 +08:00
|
|
|
; CHECK: or <8 x i16>
|
2014-03-25 22:32:05 +08:00
|
|
|
; CHECK-DAG: select i1
|
|
|
|
; CHECK-ORIGINS-DAG: select i1
|
|
|
|
; CHECK-ORIGINS-DAG: select i1
|
|
|
|
; CHECK-DAG: select i1
|
2013-10-14 17:52:09 +08:00
|
|
|
; CHECK: ret <8 x i16>
|
|
|
|
|
|
|
|
|
2013-09-03 21:05:29 +08:00
|
|
|
define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } %b) readnone sanitize_memory {
|
|
|
|
entry:
|
|
|
|
%c = select i1 %x, { i64, i64 } %a, { i64, i64 } %b
|
|
|
|
ret { i64, i64 } %c
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @SelectStruct
|
2013-09-03 21:05:29 +08:00
|
|
|
; CHECK: select i1 {{.*}}, { i64, i64 }
|
|
|
|
; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
|
2013-11-21 19:37:16 +08:00
|
|
|
; CHECK-ORIGINS: select i1
|
2013-11-21 20:00:24 +08:00
|
|
|
; CHECK-ORIGINS: select i1
|
2013-09-03 21:05:29 +08:00
|
|
|
; CHECK-NEXT: select i1 {{.*}}, { i64, i64 }
|
|
|
|
; CHECK: ret { i64, i64 }
|
|
|
|
|
|
|
|
|
2014-03-25 21:08:34 +08:00
|
|
|
define { i64*, double } @SelectStruct2(i1 zeroext %x, { i64*, double } %a, { i64*, double } %b) readnone sanitize_memory {
|
|
|
|
entry:
|
|
|
|
%c = select i1 %x, { i64*, double } %a, { i64*, double } %b
|
|
|
|
ret { i64*, double } %c
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @SelectStruct2
|
2014-03-25 21:08:34 +08:00
|
|
|
; CHECK: select i1 {{.*}}, { i64, i64 }
|
|
|
|
; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
|
|
|
|
; CHECK-ORIGINS: select i1
|
|
|
|
; CHECK-ORIGINS: select i1
|
|
|
|
; CHECK-NEXT: select i1 {{.*}}, { i64*, double }
|
|
|
|
; CHECK: ret { i64*, double }
|
|
|
|
|
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
|
|
|
%0 = inttoptr i64 %x to i8*
|
|
|
|
ret i8* %0
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @IntToPtr
|
2015-02-28 05:17:42 +08:00
|
|
|
; CHECK: load i64, i64*{{.*}}__msan_param_tls
|
|
|
|
; CHECK-ORIGINS-NEXT: load i32, i32*{{.*}}__msan_param_origin_tls
|
2012-11-29 17:57:20 +08:00
|
|
|
; CHECK-NEXT: inttoptr
|
|
|
|
; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
|
2013-11-21 19:37:16 +08:00
|
|
|
; CHECK: ret i8*
|
2012-11-29 17:57:20 +08:00
|
|
|
|
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
|
|
|
%0 = inttoptr i16 %x to i8*
|
|
|
|
ret i8* %0
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @IntToPtr_ZExt
|
2015-02-28 05:17:42 +08:00
|
|
|
; CHECK: load i16, i16*{{.*}}__msan_param_tls
|
2012-11-29 17:57:20 +08:00
|
|
|
; CHECK: zext
|
|
|
|
; CHECK-NEXT: inttoptr
|
2013-11-21 19:37:16 +08:00
|
|
|
; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
|
|
|
|
; CHECK: ret i8*
|
2012-11-29 17:57:20 +08:00
|
|
|
|
|
|
|
|
|
|
|
; Check that we insert exactly one check on udiv
|
|
|
|
; (2nd arg shadow is checked, 1st arg shadow is propagated)
|
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
|
2012-11-29 17:57:20 +08:00
|
|
|
entry:
|
|
|
|
%div = udiv i32 %a, %b
|
|
|
|
ret i32 %div
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @Div
|
2012-11-29 17:57:20 +08:00
|
|
|
; CHECK: icmp
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: call void @__msan_warning
|
2012-11-29 17:57:20 +08:00
|
|
|
; CHECK-NOT: icmp
|
|
|
|
; CHECK: udiv
|
|
|
|
; CHECK-NOT: icmp
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret i32
|
2012-11-29 22:05:53 +08:00
|
|
|
|
|
|
|
|
2012-11-29 22:25:47 +08:00
|
|
|
; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
|
|
|
|
|
2015-08-26 06:19:11 +08:00
|
|
|
define zeroext i1 @ICmpSLTZero(i32 %x) nounwind uwtable readnone sanitize_memory {
|
2012-11-29 22:25:47 +08:00
|
|
|
%1 = icmp slt i32 %x, 0
|
|
|
|
ret i1 %1
|
|
|
|
}
|
|
|
|
|
2015-08-26 06:19:11 +08:00
|
|
|
; CHECK-LABEL: @ICmpSLTZero
|
2012-11-29 22:25:47 +08:00
|
|
|
; CHECK: icmp slt
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK-NOT: call void @__msan_warning
|
2012-11-29 22:25:47 +08:00
|
|
|
; CHECK: icmp slt
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: ret i1
|
2012-11-29 22:25:47 +08:00
|
|
|
|
2015-08-26 06:19:11 +08:00
|
|
|
define zeroext i1 @ICmpSGEZero(i32 %x) nounwind uwtable readnone sanitize_memory {
|
2012-11-29 22:25:47 +08:00
|
|
|
%1 = icmp sge i32 %x, 0
|
|
|
|
ret i1 %1
|
|
|
|
}
|
|
|
|
|
2015-08-26 06:19:11 +08:00
|
|
|
; CHECK-LABEL: @ICmpSGEZero
|
2013-01-28 19:42:28 +08:00
|
|
|
; CHECK: icmp slt
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK-NOT: call void @__msan_warning
|
2012-11-29 22:25:47 +08:00
|
|
|
; CHECK: icmp sge
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: ret i1
|
2012-11-29 22:25:47 +08:00
|
|
|
|
2015-08-26 06:19:11 +08:00
|
|
|
define zeroext i1 @ICmpSGTZero(i32 %x) nounwind uwtable readnone sanitize_memory {
|
2012-11-29 22:25:47 +08:00
|
|
|
%1 = icmp sgt i32 0, %x
|
|
|
|
ret i1 %1
|
|
|
|
}
|
|
|
|
|
2015-08-26 06:19:11 +08:00
|
|
|
; CHECK-LABEL: @ICmpSGTZero
|
2013-01-28 19:42:28 +08:00
|
|
|
; CHECK: icmp slt
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK-NOT: call void @__msan_warning
|
2012-11-29 22:25:47 +08:00
|
|
|
; CHECK: icmp sgt
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: ret i1
|
2012-11-29 22:25:47 +08:00
|
|
|
|
2015-08-26 06:19:11 +08:00
|
|
|
define zeroext i1 @ICmpSLEZero(i32 %x) nounwind uwtable readnone sanitize_memory {
|
2012-11-29 22:25:47 +08:00
|
|
|
%1 = icmp sle i32 0, %x
|
|
|
|
ret i1 %1
|
|
|
|
}
|
|
|
|
|
2015-08-26 06:19:11 +08:00
|
|
|
; CHECK-LABEL: @ICmpSLEZero
|
|
|
|
; CHECK: icmp slt
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: icmp sle
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: ret i1
|
|
|
|
|
|
|
|
|
|
|
|
; Check that we propagate shadow for x<=-1, x>-1, etc (i.e. sign bit tests)
|
|
|
|
|
|
|
|
define zeroext i1 @ICmpSLTAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
|
|
|
|
%1 = icmp slt i32 -1, %x
|
|
|
|
ret i1 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: @ICmpSLTAllOnes
|
|
|
|
; CHECK: icmp slt
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: icmp slt
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: ret i1
|
|
|
|
|
|
|
|
define zeroext i1 @ICmpSGEAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
|
|
|
|
%1 = icmp sge i32 -1, %x
|
|
|
|
ret i1 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: @ICmpSGEAllOnes
|
|
|
|
; CHECK: icmp slt
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: icmp sge
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: ret i1
|
|
|
|
|
|
|
|
define zeroext i1 @ICmpSGTAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
|
|
|
|
%1 = icmp sgt i32 %x, -1
|
|
|
|
ret i1 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: @ICmpSGTAllOnes
|
|
|
|
; CHECK: icmp slt
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: icmp sgt
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: ret i1
|
|
|
|
|
|
|
|
define zeroext i1 @ICmpSLEAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
|
|
|
|
%1 = icmp sle i32 %x, -1
|
|
|
|
ret i1 %1
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: @ICmpSLEAllOnes
|
2013-01-28 19:42:28 +08:00
|
|
|
; CHECK: icmp slt
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK-NOT: call void @__msan_warning
|
2012-11-29 22:25:47 +08:00
|
|
|
; CHECK: icmp sle
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: ret i1
|
2012-11-29 22:25:47 +08:00
|
|
|
|
|
|
|
|
2013-01-16 00:44:52 +08:00
|
|
|
; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
|
|
|
|
; of the vector arguments.
|
|
|
|
|
2015-08-26 06:19:11 +08:00
|
|
|
define <2 x i1> @ICmpSLT_vector_Zero(<2 x i32*> %x) nounwind uwtable readnone sanitize_memory {
|
2013-01-16 00:44:52 +08:00
|
|
|
%1 = icmp slt <2 x i32*> %x, zeroinitializer
|
|
|
|
ret <2 x i1> %1
|
|
|
|
}
|
|
|
|
|
2015-08-26 06:19:11 +08:00
|
|
|
; CHECK-LABEL: @ICmpSLT_vector_Zero
|
2013-01-16 00:44:52 +08:00
|
|
|
; CHECK: icmp slt <2 x i64>
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: icmp slt <2 x i32*>
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: ret <2 x i1>
|
|
|
|
|
2015-08-26 06:19:11 +08:00
|
|
|
; Check that we propagate shadow for x<=-1, x>0, etc (i.e. sign bit tests)
|
|
|
|
; of the vector arguments.
|
|
|
|
|
|
|
|
define <2 x i1> @ICmpSLT_vector_AllOnes(<2 x i32> %x) nounwind uwtable readnone sanitize_memory {
|
|
|
|
%1 = icmp slt <2 x i32> <i32 -1, i32 -1>, %x
|
|
|
|
ret <2 x i1> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: @ICmpSLT_vector_AllOnes
|
|
|
|
; CHECK: icmp slt <2 x i32>
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: icmp slt <2 x i32>
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: ret <2 x i1>
|
|
|
|
|
2013-01-16 00:44:52 +08:00
|
|
|
|
2013-01-28 19:42:28 +08:00
|
|
|
; Check that we propagate shadow for unsigned relational comparisons with
|
|
|
|
; constants
|
2013-01-25 23:31:10 +08:00
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define zeroext i1 @ICmpUGTConst(i32 %x) nounwind uwtable readnone sanitize_memory {
|
2013-01-25 23:31:10 +08:00
|
|
|
entry:
|
2013-01-28 19:42:28 +08:00
|
|
|
%cmp = icmp ugt i32 %x, 7
|
2013-01-25 23:31:10 +08:00
|
|
|
ret i1 %cmp
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @ICmpUGTConst
|
2013-01-28 19:42:28 +08:00
|
|
|
; CHECK: icmp ugt i32
|
2013-01-25 23:31:10 +08:00
|
|
|
; CHECK-NOT: call void @__msan_warning
|
2013-01-28 19:42:28 +08:00
|
|
|
; CHECK: icmp ugt i32
|
2013-01-25 23:31:10 +08:00
|
|
|
; CHECK-NOT: call void @__msan_warning
|
2013-01-28 19:42:28 +08:00
|
|
|
; CHECK: icmp ugt i32
|
2013-01-25 23:31:10 +08:00
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: ret i1
|
|
|
|
|
|
|
|
|
2017-07-01 15:12:15 +08:00
|
|
|
; Check that loads of shadow have the same alignment as the original loads.
|
|
|
|
; Check that loads of origin have the alignment of max(4, original alignment).
|
2012-11-29 22:05:53 +08:00
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
|
2012-11-29 22:05:53 +08:00
|
|
|
%y = alloca i32, align 64
|
2015-02-28 05:17:42 +08:00
|
|
|
%1 = load volatile i32, i32* %y, align 64
|
2012-11-29 22:05:53 +08:00
|
|
|
ret i32 %1
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @ShadowLoadAlignmentLarge
|
2015-02-28 05:17:42 +08:00
|
|
|
; CHECK: load volatile i32, i32* {{.*}} align 64
|
|
|
|
; CHECK: load i32, i32* {{.*}} align 64
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret i32
|
2012-11-29 22:05:53 +08:00
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
|
2012-11-29 22:05:53 +08:00
|
|
|
%y = alloca i32, align 2
|
2015-02-28 05:17:42 +08:00
|
|
|
%1 = load volatile i32, i32* %y, align 2
|
2012-11-29 22:05:53 +08:00
|
|
|
ret i32 %1
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @ShadowLoadAlignmentSmall
|
2015-02-28 05:17:42 +08:00
|
|
|
; CHECK: load volatile i32, i32* {{.*}} align 2
|
|
|
|
; CHECK: load i32, i32* {{.*}} align 2
|
|
|
|
; CHECK-ORIGINS: load i32, i32* {{.*}} align 4
|
2013-11-21 19:37:16 +08:00
|
|
|
; CHECK: ret i32
|
2012-12-26 19:55:09 +08:00
|
|
|
|
2012-11-30 20:12:20 +08:00
|
|
|
|
|
|
|
; Test vector manipulation instructions.
|
2012-12-04 19:42:05 +08:00
|
|
|
; Check that the same bit manipulation is applied to the shadow values.
|
|
|
|
; Check that there is a zero test of the shadow of %idx argument, where present.
|
2012-11-30 20:12:20 +08:00
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
|
2012-11-30 20:12:20 +08:00
|
|
|
%x = extractelement <4 x i32> %vec, i32 %idx
|
|
|
|
ret i32 %x
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @ExtractElement
|
2012-11-30 20:12:20 +08:00
|
|
|
; CHECK: extractelement
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: call void @__msan_warning
|
2012-11-30 20:12:20 +08:00
|
|
|
; CHECK: extractelement
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret i32
|
2012-11-30 20:12:20 +08:00
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory {
|
2012-11-30 20:12:20 +08:00
|
|
|
%vec1 = insertelement <4 x i32> %vec, i32 %x, i32 %idx
|
|
|
|
ret <4 x i32> %vec1
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @InsertElement
|
2012-11-30 20:12:20 +08:00
|
|
|
; CHECK: insertelement
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: call void @__msan_warning
|
2012-11-30 20:12:20 +08:00
|
|
|
; CHECK: insertelement
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret <4 x i32>
|
2012-11-30 20:12:20 +08:00
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory {
|
2012-11-30 20:12:20 +08:00
|
|
|
%vec2 = shufflevector <4 x i32> %vec, <4 x i32> %vec1,
|
|
|
|
<4 x i32> <i32 0, i32 4, i32 1, i32 5>
|
|
|
|
ret <4 x i32> %vec2
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @ShuffleVector
|
2012-11-30 20:12:20 +08:00
|
|
|
; CHECK: shufflevector
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK-NOT: call void @__msan_warning
|
2012-11-30 20:12:20 +08:00
|
|
|
; CHECK: shufflevector
|
2012-12-04 19:42:05 +08:00
|
|
|
; CHECK: ret <4 x i32>
|
2012-12-05 22:39:55 +08:00
|
|
|
|
2012-12-19 19:22:04 +08:00
|
|
|
|
2012-12-05 22:39:55 +08:00
|
|
|
; Test bswap intrinsic instrumentation
|
2013-02-28 19:25:14 +08:00
|
|
|
define i32 @BSwap(i32 %x) nounwind uwtable readnone sanitize_memory {
|
2012-12-05 22:39:55 +08:00
|
|
|
%y = tail call i32 @llvm.bswap.i32(i32 %x)
|
|
|
|
ret i32 %y
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i32 @llvm.bswap.i32(i32) nounwind readnone
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @BSwap
|
2012-12-05 22:39:55 +08:00
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: @llvm.bswap.i32
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: @llvm.bswap.i32
|
|
|
|
; CHECK-NOT: call void @__msan_warning
|
|
|
|
; CHECK: ret i32
|
2012-12-19 19:22:04 +08:00
|
|
|
|
2012-12-26 00:04:38 +08:00
|
|
|
; Test handling of vectors of pointers.
|
|
|
|
; Check that shadow of such vector is a vector of integers.
|
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memory {
|
2015-02-28 05:17:42 +08:00
|
|
|
%x = load <8 x i8*>, <8 x i8*>* %p
|
2012-12-26 00:04:38 +08:00
|
|
|
ret <8 x i8*> %x
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @VectorOfPointers
|
2015-02-28 05:17:42 +08:00
|
|
|
; CHECK: load <8 x i8*>, <8 x i8*>*
|
|
|
|
; CHECK: load <8 x i64>, <8 x i64>*
|
2012-12-26 00:04:38 +08:00
|
|
|
; CHECK: store <8 x i64> {{.*}} @__msan_retval_tls
|
|
|
|
; CHECK: ret <8 x i8*>
|
2013-01-11 06:36:33 +08:00
|
|
|
|
|
|
|
; Test handling of va_copy.
|
|
|
|
|
|
|
|
declare void @llvm.va_copy(i8*, i8*) nounwind
|
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory {
|
2013-01-11 06:36:33 +08:00
|
|
|
call void @llvm.va_copy(i8* %p1, i8* %p2) nounwind
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @VACopy
|
2015-11-19 13:56:52 +08:00
|
|
|
; CHECK: call void @llvm.memset.p0i8.i64({{.*}}, i8 0, i64 24, i32 8, i1 false)
|
2013-01-11 06:36:33 +08:00
|
|
|
; CHECK: ret void
|
2013-01-22 20:30:52 +08:00
|
|
|
|
|
|
|
|
2013-08-23 20:11:00 +08:00
|
|
|
; Test that va_start instrumentation does not use va_arg_tls*.
|
|
|
|
; It should work with a local stack copy instead.
|
|
|
|
|
|
|
|
%struct.__va_list_tag = type { i32, i32, i8*, i8* }
|
|
|
|
declare void @llvm.va_start(i8*) nounwind
|
|
|
|
|
|
|
|
; Function Attrs: nounwind uwtable
|
2014-07-03 19:49:50 +08:00
|
|
|
define void @VAStart(i32 %x, ...) sanitize_memory {
|
2013-08-23 20:11:00 +08:00
|
|
|
entry:
|
|
|
|
%x.addr = alloca i32, align 4
|
|
|
|
%va = alloca [1 x %struct.__va_list_tag], align 16
|
|
|
|
store i32 %x, i32* %x.addr, align 4
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i32 0, i32 0
|
2013-08-23 20:11:00 +08:00
|
|
|
%arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*
|
|
|
|
call void @llvm.va_start(i8* %arraydecay1)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @VAStart
|
2013-08-23 20:11:00 +08:00
|
|
|
; CHECK: call void @llvm.va_start
|
|
|
|
; CHECK-NOT: @__msan_va_arg_tls
|
|
|
|
; CHECK-NOT: @__msan_va_arg_overflow_size_tls
|
|
|
|
; CHECK: ret void
|
|
|
|
|
|
|
|
|
2013-01-22 20:30:52 +08:00
|
|
|
; Test handling of volatile stores.
|
|
|
|
; Check that MemorySanitizer does not add a check of the value being stored.
|
|
|
|
|
2013-02-28 19:25:14 +08:00
|
|
|
define void @VolatileStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
|
2013-01-22 20:30:52 +08:00
|
|
|
entry:
|
|
|
|
store volatile i32 %x, i32* %p, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @VolatileStore
|
2013-01-22 20:30:52 +08:00
|
|
|
; CHECK-NOT: @__msan_warning
|
|
|
|
; CHECK: ret void
|
2013-02-28 19:25:14 +08:00
|
|
|
|
|
|
|
|
2014-07-03 19:56:30 +08:00
|
|
|
; Test that checks are omitted and returned value is always initialized if
|
2013-02-28 19:25:14 +08:00
|
|
|
; sanitize_memory attribute is missing.
|
|
|
|
|
|
|
|
define i32 @NoSanitizeMemory(i32 %x) uwtable {
|
|
|
|
entry:
|
|
|
|
%tobool = icmp eq i32 %x, 0
|
|
|
|
br i1 %tobool, label %if.end, label %if.then
|
|
|
|
|
|
|
|
if.then: ; preds = %entry
|
|
|
|
tail call void @bar()
|
|
|
|
br label %if.end
|
|
|
|
|
|
|
|
if.end: ; preds = %entry, %if.then
|
|
|
|
ret i32 %x
|
|
|
|
}
|
|
|
|
|
|
|
|
declare void @bar()
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @NoSanitizeMemory
|
2013-02-28 19:25:14 +08:00
|
|
|
; CHECK-NOT: @__msan_warning
|
2014-07-03 19:56:30 +08:00
|
|
|
; CHECK: store i32 0, {{.*}} @__msan_retval_tls
|
2013-02-28 19:25:14 +08:00
|
|
|
; CHECK-NOT: @__msan_warning
|
|
|
|
; CHECK: ret i32
|
2013-05-28 21:07:43 +08:00
|
|
|
|
|
|
|
|
2013-07-03 22:39:14 +08:00
|
|
|
; Test that stack allocations are unpoisoned in functions missing
|
|
|
|
; sanitize_memory attribute
|
|
|
|
|
|
|
|
define i32 @NoSanitizeMemoryAlloca() {
|
|
|
|
entry:
|
|
|
|
%p = alloca i32, align 4
|
|
|
|
%x = call i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
|
|
|
|
ret i32 %x
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @NoSanitizeMemoryAlloca
|
2015-11-19 13:56:52 +08:00
|
|
|
; CHECK: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 0, i64 4, i32 4, i1 false)
|
2013-07-03 22:39:14 +08:00
|
|
|
; CHECK: call i32 @NoSanitizeMemoryAllocaHelper(i32*
|
|
|
|
; CHECK: ret i32
|
|
|
|
|
|
|
|
|
|
|
|
; Test that undef is unpoisoned in functions missing
|
|
|
|
; sanitize_memory attribute
|
|
|
|
|
|
|
|
define i32 @NoSanitizeMemoryUndef() {
|
|
|
|
entry:
|
|
|
|
%x = call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
|
|
|
|
ret i32 %x
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @NoSanitizeMemoryUndef
|
2013-07-03 22:39:14 +08:00
|
|
|
; CHECK: store i32 0, i32* {{.*}} @__msan_param_tls
|
|
|
|
; CHECK: call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
|
|
|
|
; CHECK: ret i32
|
|
|
|
|
|
|
|
|
2014-07-07 21:28:31 +08:00
|
|
|
; Test PHINode instrumentation in blacklisted functions
|
|
|
|
|
|
|
|
define i32 @NoSanitizeMemoryPHI(i32 %x) {
|
|
|
|
entry:
|
|
|
|
%tobool = icmp ne i32 %x, 0
|
|
|
|
br i1 %tobool, label %cond.true, label %cond.false
|
|
|
|
|
|
|
|
cond.true: ; preds = %entry
|
|
|
|
br label %cond.end
|
|
|
|
|
|
|
|
cond.false: ; preds = %entry
|
|
|
|
br label %cond.end
|
|
|
|
|
|
|
|
cond.end: ; preds = %cond.false, %cond.true
|
|
|
|
%cond = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
|
|
|
|
ret i32 %cond
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK: [[A:%.*]] = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
|
|
|
|
; CHECK: store i32 0, i32* bitcast {{.*}} @__msan_retval_tls
|
|
|
|
; CHECK: ret i32 [[A]]
|
|
|
|
|
|
|
|
|
2014-07-17 17:10:37 +08:00
|
|
|
; Test that there are no __msan_param_origin_tls stores when
|
|
|
|
; argument shadow is a compile-time zero constant (which is always the case
|
|
|
|
; in functions missing sanitize_memory attribute).
|
|
|
|
|
|
|
|
define i32 @NoSanitizeMemoryParamTLS(i32* nocapture readonly %x) {
|
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%0 = load i32, i32* %x, align 4
|
2014-07-17 17:10:37 +08:00
|
|
|
%call = tail call i32 @NoSanitizeMemoryParamTLSHelper(i32 %0)
|
|
|
|
ret i32 %call
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i32 @NoSanitizeMemoryParamTLSHelper(i32 %x)
|
|
|
|
|
|
|
|
; CHECK-LABEL: define i32 @NoSanitizeMemoryParamTLS(
|
|
|
|
; CHECK-NOT: __msan_param_origin_tls
|
|
|
|
; CHECK: ret i32
|
|
|
|
|
|
|
|
|
2013-05-28 21:07:43 +08:00
|
|
|
; Test argument shadow alignment
|
|
|
|
|
|
|
|
define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory {
|
|
|
|
entry:
|
|
|
|
ret <2 x i64> %b
|
|
|
|
}
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @ArgumentShadowAlignment
|
2015-02-28 05:17:42 +08:00
|
|
|
; CHECK: load <2 x i64>, <2 x i64>* {{.*}} @__msan_param_tls {{.*}}, align 8
|
2013-05-28 21:07:43 +08:00
|
|
|
; CHECK: store <2 x i64> {{.*}} @__msan_retval_tls {{.*}}, align 8
|
|
|
|
; CHECK: ret <2 x i64>
|
|
|
|
|
|
|
|
|
2013-11-11 21:37:10 +08:00
|
|
|
; Test origin propagation for insertvalue
|
|
|
|
|
|
|
|
define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory {
|
|
|
|
entry:
|
|
|
|
%a = insertvalue { i64, i32 } undef, i64 %x, 0
|
|
|
|
%b = insertvalue { i64, i32 } %a, i32 %y, 1
|
|
|
|
ret { i64, i32 } %b
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-ORIGINS: @make_pair_64_32
|
|
|
|
; First element shadow
|
|
|
|
; CHECK-ORIGINS: insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 {{.*}}, 0
|
|
|
|
; First element origin
|
|
|
|
; CHECK-ORIGINS: icmp ne i64
|
|
|
|
; CHECK-ORIGINS: select i1
|
|
|
|
; First element app value
|
|
|
|
; CHECK-ORIGINS: insertvalue { i64, i32 } undef, i64 {{.*}}, 0
|
|
|
|
; Second element shadow
|
|
|
|
; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
|
|
|
|
; Second element origin
|
|
|
|
; CHECK-ORIGINS: icmp ne i32
|
|
|
|
; CHECK-ORIGINS: select i1
|
|
|
|
; Second element app value
|
|
|
|
; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
|
|
|
|
; CHECK-ORIGINS: ret { i64, i32 }
|
2014-03-13 21:17:11 +08:00
|
|
|
|
|
|
|
|
|
|
|
; Test shadow propagation for aggregates passed through ellipsis.
|
|
|
|
|
|
|
|
%struct.StructByVal = type { i32, i32, i32, i32 }
|
|
|
|
|
|
|
|
declare void @VAArgStructFn(i32 %guard, ...)
|
|
|
|
|
|
|
|
define void @VAArgStruct(%struct.StructByVal* nocapture %s) sanitize_memory {
|
|
|
|
entry:
|
|
|
|
%agg.tmp2 = alloca %struct.StructByVal, align 8
|
|
|
|
%0 = bitcast %struct.StructByVal* %s to i8*
|
|
|
|
%agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
|
2015-02-28 05:17:42 +08:00
|
|
|
%agg.tmp.sroa.0.0.copyload = load i64, i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
|
[opaque pointer type] Add textual IR support for explicit type parameter to getelementptr instruction
One of several parallel first steps to remove the target type of pointers,
replacing them with a single opaque pointer type.
This adds an explicit type parameter to the gep instruction so that when the
first parameter becomes an opaque pointer type, the type to gep through is
still available to the instructions.
* This doesn't modify gep operators, only instructions (operators will be
handled separately)
* Textual IR changes only. Bitcode (including upgrade) and changing the
in-memory representation will be in separate changes.
* geps of vectors are transformed as:
getelementptr <4 x float*> %x, ...
->getelementptr float, <4 x float*> %x, ...
Then, once the opaque pointer type is introduced, this will ultimately look
like:
getelementptr float, <4 x ptr> %x
with the unambiguous interpretation that it is a vector of pointers to float.
* address spaces remain on the pointer, not the type:
getelementptr float addrspace(1)* %x
->getelementptr float, float addrspace(1)* %x
Then, eventually:
getelementptr float, ptr addrspace(1) %x
Importantly, the massive amount of test case churn has been automated by
same crappy python code. I had to manually update a few test cases that
wouldn't fit the script's model (r228970,r229196,r229197,r229198). The
python script just massages stdin and writes the result to stdout, I
then wrapped that in a shell script to handle replacing files, then
using the usual find+xargs to migrate all the files.
update.py:
import fileinput
import sys
import re
ibrep = re.compile(r"(^.*?[^%\w]getelementptr inbounds )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
normrep = re.compile( r"(^.*?[^%\w]getelementptr )(((?:<\d* x )?)(.*?)(| addrspace\(\d\)) *\*(|>)(?:$| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$))")
def conv(match, line):
if not match:
return line
line = match.groups()[0]
if len(match.groups()[5]) == 0:
line += match.groups()[2]
line += match.groups()[3]
line += ", "
line += match.groups()[1]
line += "\n"
return line
for line in sys.stdin:
if line.find("getelementptr ") == line.find("getelementptr inbounds"):
if line.find("getelementptr inbounds") != line.find("getelementptr inbounds ("):
line = conv(re.match(ibrep, line), line)
elif line.find("getelementptr ") != line.find("getelementptr ("):
line = conv(re.match(normrep, line), line)
sys.stdout.write(line)
apply.sh:
for name in "$@"
do
python3 `dirname "$0"`/update.py < "$name" > "$name.tmp" && mv "$name.tmp" "$name"
rm -f "$name.tmp"
done
The actual commands:
From llvm/src:
find test/ -name *.ll | xargs ./apply.sh
From llvm/src/tools/clang:
find test/ -name *.mm -o -name *.m -o -name *.cpp -o -name *.c | xargs -I '{}' ../../apply.sh "{}"
From llvm/src/tools/polly:
find test/ -name *.ll | xargs ./apply.sh
After that, check-all (with llvm, clang, clang-tools-extra, lld,
compiler-rt, and polly all checked out).
The extra 'rm' in the apply.sh script is due to a few files in clang's test
suite using interesting unicode stuff that my python script was throwing
exceptions on. None of those files needed to be migrated, so it seemed
sufficient to ignore those cases.
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7636
llvm-svn: 230786
2015-02-28 03:29:02 +08:00
|
|
|
%agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, %struct.StructByVal* %s, i64 0, i32 2
|
2014-03-13 21:17:11 +08:00
|
|
|
%agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
|
2015-02-28 05:17:42 +08:00
|
|
|
%agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
|
2014-03-13 21:17:11 +08:00
|
|
|
%1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
|
2015-11-19 13:56:52 +08:00
|
|
|
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %0, i64 16, i32 4, i1 false)
|
[opaque pointer type] Add textual IR support for explicit type parameter to the call instruction
See r230786 and r230794 for similar changes to gep and load
respectively.
Call is a bit different because it often doesn't have a single explicit
type - usually the type is deduced from the arguments, and just the
return type is explicit. In those cases there's no need to change the
IR.
When that's not the case, the IR usually contains the pointer type of
the first operand - but since typed pointers are going away, that
representation is insufficient so I'm just stripping the "pointerness"
of the explicit type away.
This does make the IR a bit weird - it /sort of/ reads like the type of
the first operand: "call void () %x(" but %x is actually of type "void
()*" and will eventually be just of type "ptr". But this seems not too
bad and I don't think it would benefit from repeating the type
("void (), void () * %x(" and then eventually "void (), ptr %x(") as has
been done with gep and load.
This also has a side benefit: since the explicit type is no longer a
pointer, there's no ambiguity between an explicit type and a function
that returns a function pointer. Previously this case needed an explicit
type (eg: a function returning a void() function was written as
"call void () () * @x(" rather than "call void () * @x(" because of the
ambiguity between a function returning a pointer to a void() function
and a function returning void).
No ambiguity means even function pointer return types can just be
written alone, without writing the whole function's type.
This leaves /only/ the varargs case where the explicit type is required.
Given the special type syntax in call instructions, the regex-fu used
for migration was a bit more involved in its own unique way (as every
one of these is) so here it is. Use it in conjunction with the apply.sh
script and associated find/xargs commands I've provided in rr230786 to
migrate your out of tree tests. Do let me know if any of this doesn't
cover your cases & we can iterate on a more general script/regexes to
help others with out of tree tests.
About 9 test cases couldn't be automatically migrated - half of those
were functions returning function pointers, where I just had to manually
delete the function argument types now that we didn't need an explicit
function type there. The other half were typedefs of function types used
in calls - just had to manually drop the * from those.
import fileinput
import sys
import re
pat = re.compile(r'((?:=|:|^|\s)call\s(?:[^@]*?))(\s*$|\s*(?:(?:\[\[[a-zA-Z0-9_]+\]\]|[@%](?:(")?[\\\?@a-zA-Z0-9_.]*?(?(3)"|)|{{.*}}))(?:\(|$)|undef|inttoptr|bitcast|null|asm).*$)')
addrspace_end = re.compile(r"addrspace\(\d+\)\s*\*$")
func_end = re.compile("(?:void.*|\)\s*)\*$")
def conv(match, line):
if not match or re.search(addrspace_end, match.group(1)) or not re.search(func_end, match.group(1)):
return line
return line[:match.start()] + match.group(1)[:match.group(1).rfind('*')].rstrip() + match.group(2) + line[match.end():]
for line in sys.stdin:
sys.stdout.write(conv(re.search(pat, line), line))
llvm-svn: 235145
2015-04-17 07:24:18 +08:00
|
|
|
call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval align 8 %agg.tmp2)
|
2014-03-13 21:17:11 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; "undef" and the first 2 structs go to general purpose registers;
|
|
|
|
; the third struct goes to the overflow area byval
|
|
|
|
|
2015-08-26 04:59:26 +08:00
|
|
|
; CHECK-LABEL: @VAArgStruct
|
2016-05-07 03:36:56 +08:00
|
|
|
; undef not stored to __msan_va_arg_tls - it's a fixed argument
|
2014-03-13 21:17:11 +08:00
|
|
|
; first struct through general purpose registers
|
|
|
|
; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 8){{.*}}, align 8
|
|
|
|
; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 16){{.*}}, align 8
|
|
|
|
; second struct through general purpose registers
|
|
|
|
; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 24){{.*}}, align 8
|
|
|
|
; CHECK: store i64 {{.*}}, i64* {{.*}}@__msan_va_arg_tls{{.*}}, i64 32){{.*}}, align 8
|
|
|
|
; third struct through the overflow area byval
|
|
|
|
; CHECK: ptrtoint %struct.StructByVal* {{.*}} to i64
|
|
|
|
; CHECK: bitcast { i32, i32, i32, i32 }* {{.*}}@__msan_va_arg_tls {{.*}}, i64 176
|
|
|
|
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
|
|
|
|
; CHECK: store i64 16, i64* @__msan_va_arg_overflow_size_tls
|
[opaque pointer type] Add textual IR support for explicit type parameter to the call instruction
See r230786 and r230794 for similar changes to gep and load
respectively.
Call is a bit different because it often doesn't have a single explicit
type - usually the type is deduced from the arguments, and just the
return type is explicit. In those cases there's no need to change the
IR.
When that's not the case, the IR usually contains the pointer type of
the first operand - but since typed pointers are going away, that
representation is insufficient so I'm just stripping the "pointerness"
of the explicit type away.
This does make the IR a bit weird - it /sort of/ reads like the type of
the first operand: "call void () %x(" but %x is actually of type "void
()*" and will eventually be just of type "ptr". But this seems not too
bad and I don't think it would benefit from repeating the type
("void (), void () * %x(" and then eventually "void (), ptr %x(") as has
been done with gep and load.
This also has a side benefit: since the explicit type is no longer a
pointer, there's no ambiguity between an explicit type and a function
that returns a function pointer. Previously this case needed an explicit
type (eg: a function returning a void() function was written as
"call void () () * @x(" rather than "call void () * @x(" because of the
ambiguity between a function returning a pointer to a void() function
and a function returning void).
No ambiguity means even function pointer return types can just be
written alone, without writing the whole function's type.
This leaves /only/ the varargs case where the explicit type is required.
Given the special type syntax in call instructions, the regex-fu used
for migration was a bit more involved in its own unique way (as every
one of these is) so here it is. Use it in conjunction with the apply.sh
script and associated find/xargs commands I've provided in rr230786 to
migrate your out of tree tests. Do let me know if any of this doesn't
cover your cases & we can iterate on a more general script/regexes to
help others with out of tree tests.
About 9 test cases couldn't be automatically migrated - half of those
were functions returning function pointers, where I just had to manually
delete the function argument types now that we didn't need an explicit
function type there. The other half were typedefs of function types used
in calls - just had to manually drop the * from those.
import fileinput
import sys
import re
pat = re.compile(r'((?:=|:|^|\s)call\s(?:[^@]*?))(\s*$|\s*(?:(?:\[\[[a-zA-Z0-9_]+\]\]|[@%](?:(")?[\\\?@a-zA-Z0-9_.]*?(?(3)"|)|{{.*}}))(?:\(|$)|undef|inttoptr|bitcast|null|asm).*$)')
addrspace_end = re.compile(r"addrspace\(\d+\)\s*\*$")
func_end = re.compile("(?:void.*|\)\s*)\*$")
def conv(match, line):
if not match or re.search(addrspace_end, match.group(1)) or not re.search(func_end, match.group(1)):
return line
return line[:match.start()] + match.group(1)[:match.group(1).rfind('*')].rstrip() + match.group(2) + line[match.end():]
for line in sys.stdin:
sys.stdout.write(conv(re.search(pat, line), line))
llvm-svn: 235145
2015-04-17 07:24:18 +08:00
|
|
|
; CHECK: call void (i32, ...) @VAArgStructFn
|
2014-03-13 21:17:11 +08:00
|
|
|
; CHECK: ret void
|
2014-07-01 04:12:27 +08:00
|
|
|
|
2015-08-15 06:03:50 +08:00
|
|
|
|
2014-07-01 04:12:27 +08:00
|
|
|
declare i32 @InnerTailCall(i32 %a)
|
|
|
|
|
2014-07-03 19:56:30 +08:00
|
|
|
define void @MismatchedReturnTypeTailCall(i32 %a) sanitize_memory {
|
2014-07-01 04:12:27 +08:00
|
|
|
%b = tail call i32 @InnerTailCall(i32 %a)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; We used to strip off the 'tail' modifier, but now that we unpoison return slot
|
|
|
|
; shadow before the call, we don't need to anymore.
|
|
|
|
|
|
|
|
; CHECK-LABEL: define void @MismatchedReturnTypeTailCall
|
|
|
|
; CHECK: tail call i32 @InnerTailCall
|
|
|
|
; CHECK: ret void
|
2015-05-08 05:41:52 +08:00
|
|
|
|
2015-08-15 06:03:50 +08:00
|
|
|
|
|
|
|
declare i32 @MustTailCall(i32 %a)
|
|
|
|
|
|
|
|
define i32 @CallMustTailCall(i32 %a) sanitize_memory {
|
|
|
|
%b = musttail call i32 @MustTailCall(i32 %a)
|
|
|
|
ret i32 %b
|
|
|
|
}
|
|
|
|
|
|
|
|
; For "musttail" calls we can not insert any shadow manipulating code between
|
|
|
|
; call and the return instruction. And we don't need to, because everything is
|
|
|
|
; taken care of in the callee.
|
|
|
|
|
|
|
|
; CHECK-LABEL: define i32 @CallMustTailCall
|
|
|
|
; CHECK: musttail call i32 @MustTailCall
|
|
|
|
; No instrumentation between call and ret.
|
|
|
|
; CHECK-NEXT: ret i32
|
|
|
|
|
|
|
|
declare i32* @MismatchingMustTailCall(i32 %a)
|
|
|
|
|
|
|
|
define i8* @MismatchingCallMustTailCall(i32 %a) sanitize_memory {
|
|
|
|
%b = musttail call i32* @MismatchingMustTailCall(i32 %a)
|
|
|
|
%c = bitcast i32* %b to i8*
|
|
|
|
ret i8* %c
|
|
|
|
}
|
|
|
|
|
|
|
|
; For "musttail" calls we can not insert any shadow manipulating code between
|
|
|
|
; call and the return instruction. And we don't need to, because everything is
|
|
|
|
; taken care of in the callee.
|
|
|
|
|
|
|
|
; CHECK-LABEL: define i8* @MismatchingCallMustTailCall
|
|
|
|
; CHECK: musttail call i32* @MismatchingMustTailCall
|
|
|
|
; No instrumentation between call and ret.
|
|
|
|
; CHECK-NEXT: bitcast i32* {{.*}} to i8*
|
|
|
|
; CHECK-NEXT: ret i8*
|
|
|
|
|
|
|
|
|
2016-03-16 04:25:47 +08:00
|
|
|
; CHECK-LABEL: define internal void @msan.module_ctor() {
|
2015-05-08 05:41:52 +08:00
|
|
|
; CHECK: call void @__msan_init()
|