forked from OSchip/llvm-project
[msan] Fix llvm.abs.v intrinsic
The last argument of the intrinsic is a boolean flag to control INT_MIN handling and does not affect msan metadata.
This commit is contained in:
parent
1fd9a146d3
commit
4aa6abe4ef
|
@ -2638,6 +2638,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
|
|||
return false;
|
||||
|
||||
unsigned NumArgOperands = I.getNumArgOperands();
|
||||
if (I.getIntrinsicID() == Intrinsic::abs) {
|
||||
assert(NumArgOperands == 2);
|
||||
// The last argument is just a boolean flag.
|
||||
NumArgOperands = 1;
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < NumArgOperands; ++i) {
|
||||
Type *Ty = I.getArgOperand(i)->getType();
|
||||
|
|
|
@ -12,18 +12,15 @@ define <4 x i64> @test_mm256_abs_epi8(<4 x i64> noundef %a) local_unnamed_addr #
|
|||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
|
||||
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
|
||||
; CHECK: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <32 x i8>
|
||||
; CHECK: call void @llvm.donothing()
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <32 x i8>
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <32 x i8> [[TMP2]] to i256
|
||||
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
|
||||
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof !2
|
||||
; CHECK: call void @__msan_warning_with_origin_noreturn
|
||||
; CHECK: unreachable
|
||||
; CHECK: [[TMP7:%.*]] = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> [[TMP3]], i1 false)
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <32 x i8> [[TMP7]] to <4 x i64>
|
||||
; CHECK-NEXT: store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
|
||||
; ORIGIN-NEXT: store i32 0, i32* @__msan_retval_origin_tls, align 4
|
||||
; CHECK: ret <4 x i64> [[TMP8]]
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> [[TMP3]], i1 false)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <32 x i8> [[TMP2]] to <4 x i64>
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <32 x i8> [[TMP4]] to <4 x i64>
|
||||
; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
|
||||
; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
|
||||
; CHECK: ret <4 x i64> [[TMP6]]
|
||||
;
|
||||
entry:
|
||||
%0 = bitcast <4 x i64> %a to <32 x i8>
|
||||
|
@ -37,18 +34,15 @@ define <4 x i64> @test_mm256_abs_epi16(<4 x i64> %a) local_unnamed_addr #0 {
|
|||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
|
||||
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
|
||||
; CHECK: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <16 x i16>
|
||||
; CHECK: call void @llvm.donothing()
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <16 x i16>
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <16 x i16>
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i16> [[TMP2]] to i256
|
||||
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
|
||||
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof !2
|
||||
; CHECK: call void @__msan_warning_with_origin_noreturn
|
||||
; CHECK: unreachable
|
||||
; CHECK: [[TMP7:%.*]] = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> [[TMP3]], i1 false)
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <16 x i16> [[TMP7]] to <4 x i64>
|
||||
; CHECK-NEXT: store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
|
||||
; ORIGIN-NEXT: store i32 0, i32* @__msan_retval_origin_tls, align 4
|
||||
; CHECK: ret <4 x i64> [[TMP8]]
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> [[TMP3]], i1 false)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i16> [[TMP2]] to <4 x i64>
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i16> [[TMP4]] to <4 x i64>
|
||||
; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
|
||||
; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
|
||||
; CHECK: ret <4 x i64> [[TMP6]]
|
||||
;
|
||||
entry:
|
||||
%0 = bitcast <4 x i64> %a to <16 x i16>
|
||||
|
@ -62,18 +56,15 @@ define <4 x i64> @test_mm256_abs_epi32(<4 x i64> %a) local_unnamed_addr #0 {
|
|||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <4 x i64>*), align 8
|
||||
; ORIGIN-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
|
||||
; CHECK: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <8 x i32>
|
||||
; CHECK: call void @llvm.donothing()
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <8 x i32>
|
||||
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <8 x i32>
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP2]] to i256
|
||||
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
|
||||
; CHECK-NEXT: br i1 [[_MSCMP]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof !2
|
||||
; CHECK: call void @__msan_warning_with_origin_noreturn
|
||||
; CHECK: unreachable
|
||||
; CHECK: [[TMP7:%.*]] = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> [[TMP3]], i1 false)
|
||||
; CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x i32> [[TMP7]] to <4 x i64>
|
||||
; CHECK-NEXT: store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
|
||||
; ORIGIN-NEXT: store i32 0, i32* @__msan_retval_origin_tls, align 4
|
||||
; CHECK: ret <4 x i64> [[TMP8]]
|
||||
; CHECK-NEXT: [[TMP4:%.*]] = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> [[TMP3]], i1 false)
|
||||
; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i32> [[TMP2]] to <4 x i64>
|
||||
; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i32> [[TMP4]] to <4 x i64>
|
||||
; CHECK-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
|
||||
; ORIGIN-NEXT: store i32 [[TMP1]], i32* @__msan_retval_origin_tls, align 4
|
||||
; CHECK: ret <4 x i64> [[TMP6]]
|
||||
;
|
||||
entry:
|
||||
%0 = bitcast <4 x i64> %a to <8 x i32>
|
||||
|
|
Loading…
Reference in New Issue