From f7abc8dff6cac3f3a2300c4e748b070402e881e7 Mon Sep 17 00:00:00 2001 From: Evgeniy Stepanov Date: Mon, 3 Mar 2014 13:52:36 +0000 Subject: [PATCH] [msan] Tests for X86 SIMD bitshift intrinsic support. llvm-svn: 202713 --- compiler-rt/lib/msan/tests/msan_test.cc | 96 +++++++++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/compiler-rt/lib/msan/tests/msan_test.cc b/compiler-rt/lib/msan/tests/msan_test.cc index 81b96e55f911..fb7011eabf92 100644 --- a/compiler-rt/lib/msan/tests/msan_test.cc +++ b/compiler-rt/lib/msan/tests/msan_test.cc @@ -62,6 +62,10 @@ # define MSAN_HAS_M128 0 #endif +#ifdef __AVX2__ +# include +#endif + static const int kPageSize = 4096; typedef unsigned char U1; @@ -3272,6 +3276,98 @@ TEST(MemorySanitizer, UnalignedStore64) { EXPECT_POISONED_O(x[11], origin); } +namespace { +typedef U2 V8x16 __attribute__((__vector_size__(16))); +typedef U4 V4x32 __attribute__((__vector_size__(16))); +typedef U8 V2x64 __attribute__((__vector_size__(16))); +typedef U4 V8x32 __attribute__((__vector_size__(32))); +typedef U8 V4x64 __attribute__((__vector_size__(32))); + + +V8x16 shift_sse2_left_scalar(V8x16 x, U4 y) { + return _mm_slli_epi16(x, y); +} + +V8x16 shift_sse2_left(V8x16 x, V8x16 y) { + return _mm_sll_epi16(x, y); +} + +TEST(VectorShiftTest, sse2_left_scalar) { + V8x16 v = {(U2)(*GetPoisoned() | 3), (U2)(*GetPoisoned() | 7), 2, 3, + 4, 5, 6, 7}; + V8x16 u = shift_sse2_left_scalar(v, 2); + EXPECT_POISONED(u[0]); + EXPECT_POISONED(u[1]); + EXPECT_NOT_POISONED(u[0] | (~7U)); + EXPECT_NOT_POISONED(u[1] | (~31U)); + u[0] = u[1] = 0; + EXPECT_NOT_POISONED(u); +} + +TEST(VectorShiftTest, sse2_left_scalar_by_uninit) { + V8x16 v = {0, 1, 2, 3, 4, 5, 6, 7}; + V8x16 u = shift_sse2_left_scalar(v, *GetPoisoned()); + EXPECT_POISONED(u[0]); + EXPECT_POISONED(u[1]); + EXPECT_POISONED(u[2]); + EXPECT_POISONED(u[3]); + EXPECT_POISONED(u[4]); + EXPECT_POISONED(u[5]); + EXPECT_POISONED(u[6]); + EXPECT_POISONED(u[7]); +} + +TEST(VectorShiftTest, sse2_left) { + V8x16 v = {(U2)(*GetPoisoned() | 3), (U2)(*GetPoisoned() | 7), 2, 3, + 4, 5, 6, 7}; + // Top 64 bits of shift count don't affect the result. + V2x64 s = {2, *GetPoisoned()}; + V8x16 u = shift_sse2_left(v, s); + EXPECT_POISONED(u[0]); + EXPECT_POISONED(u[1]); + EXPECT_NOT_POISONED(u[0] | (~7U)); + EXPECT_NOT_POISONED(u[1] | (~31U)); + u[0] = u[1] = 0; + EXPECT_NOT_POISONED(u); +} + +TEST(VectorShiftTest, sse2_left_by_uninit) { + V8x16 v = {(U2)(*GetPoisoned() | 3), (U2)(*GetPoisoned() | 7), 2, 3, + 4, 5, 6, 7}; + V2x64 s = {*GetPoisoned(), *GetPoisoned()}; + V8x16 u = shift_sse2_left(v, s); + EXPECT_POISONED(u[0]); + EXPECT_POISONED(u[1]); + EXPECT_POISONED(u[2]); + EXPECT_POISONED(u[3]); + EXPECT_POISONED(u[4]); + EXPECT_POISONED(u[5]); + EXPECT_POISONED(u[6]); + EXPECT_POISONED(u[7]); +} + +#ifdef __AVX2__ +V4x32 shift_avx2_left(V4x32 x, V4x32 y) { + return _mm_sllv_epi32(x, y); +} +// This is variable vector shift that's only available starting with AVX2. +// V4x32 shift_avx2_left(V4x32 x, V4x32 y) { +TEST(VectorShiftTest, avx2_left) { + V4x32 v = {(U2)(*GetPoisoned() | 3), (U2)(*GetPoisoned() | 7), 2, 3}; + V4x32 s = {2, *GetPoisoned(), 3, *GetPoisoned()}; + V4x32 u = shift_avx2_left(v, s); + EXPECT_POISONED(u[0]); + EXPECT_NOT_POISONED(u[0] | (~7U)); + EXPECT_POISONED(u[1]); + EXPECT_POISONED(u[1] | (~31U)); + EXPECT_NOT_POISONED(u[2]); + EXPECT_POISONED(u[3]); + EXPECT_POISONED(u[3] | (~31U)); +} +#endif // __AVX2__ +} // namespace + + TEST(MemorySanitizerDr, StoreInDSOTest) { if (!__msan_has_dynamic_component()) return; char* s = new char[10];