2013-02-22 23:10:16 +08:00
|
|
|
//===-- sanitizer_stacktrace_test.cc --------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "sanitizer_common/sanitizer_common.h"
|
|
|
|
#include "sanitizer_common/sanitizer_stacktrace.h"
|
|
|
|
#include "gtest/gtest.h"
|
|
|
|
|
|
|
|
namespace __sanitizer {
|
|
|
|
|
|
|
|
class FastUnwindTest : public ::testing::Test {
|
|
|
|
protected:
|
|
|
|
virtual void SetUp();
|
2014-10-14 21:46:07 +08:00
|
|
|
virtual void TearDown();
|
2013-11-07 15:28:33 +08:00
|
|
|
bool TryFastUnwind(uptr max_depth) {
|
|
|
|
if (!StackTrace::WillUseFastUnwind(true))
|
|
|
|
return false;
|
2014-02-11 21:45:01 +08:00
|
|
|
trace.Unwind(max_depth, start_pc, (uptr)&fake_stack[0], 0, fake_top,
|
2013-11-07 15:28:33 +08:00
|
|
|
fake_bottom, true);
|
|
|
|
return true;
|
|
|
|
}
|
2013-02-22 23:10:16 +08:00
|
|
|
|
2014-10-14 21:46:07 +08:00
|
|
|
void *mapping;
|
2015-03-04 07:46:40 +08:00
|
|
|
uhwptr *fake_stack;
|
2014-10-14 21:46:07 +08:00
|
|
|
const uptr fake_stack_size = 10;
|
2015-03-04 07:46:40 +08:00
|
|
|
uhwptr start_pc;
|
|
|
|
uhwptr fake_top;
|
|
|
|
uhwptr fake_bottom;
|
2014-10-26 11:35:14 +08:00
|
|
|
BufferedStackTrace trace;
|
2013-02-22 23:10:16 +08:00
|
|
|
};
|
|
|
|
|
2013-04-04 14:52:40 +08:00
|
|
|
static uptr PC(uptr idx) {
|
|
|
|
return (1<<20) + idx;
|
|
|
|
}
|
|
|
|
|
2013-02-22 23:10:16 +08:00
|
|
|
void FastUnwindTest::SetUp() {
|
2014-10-14 21:46:07 +08:00
|
|
|
size_t ps = GetPageSize();
|
|
|
|
mapping = MmapOrDie(2 * ps, "FastUnwindTest");
|
2015-04-10 23:02:19 +08:00
|
|
|
MprotectNoAccess((uptr)mapping, ps);
|
2014-10-14 21:46:07 +08:00
|
|
|
|
|
|
|
// Unwinder may peek 1 word down from the starting FP.
|
2015-03-04 07:46:40 +08:00
|
|
|
fake_stack = (uhwptr *)((uptr)mapping + ps + sizeof(uhwptr));
|
2014-10-14 21:46:07 +08:00
|
|
|
|
2013-02-22 23:10:16 +08:00
|
|
|
// Fill an array of pointers with fake fp+retaddr pairs. Frame pointers have
|
|
|
|
// even indices.
|
2014-10-14 21:46:07 +08:00
|
|
|
for (uptr i = 0; i + 1 < fake_stack_size; i += 2) {
|
2013-02-22 23:10:16 +08:00
|
|
|
fake_stack[i] = (uptr)&fake_stack[i+2]; // fp
|
2013-04-04 14:52:40 +08:00
|
|
|
fake_stack[i+1] = PC(i + 1); // retaddr
|
2013-02-22 23:10:16 +08:00
|
|
|
}
|
2014-09-04 07:46:12 +08:00
|
|
|
// Mark the last fp point back up to terminate the stack trace.
|
2015-03-04 07:46:40 +08:00
|
|
|
fake_stack[RoundDownTo(fake_stack_size - 1, 2)] = (uhwptr)&fake_stack[0];
|
2013-02-22 23:10:16 +08:00
|
|
|
|
|
|
|
// Top is two slots past the end because FastUnwindStack subtracts two.
|
2015-03-04 07:46:40 +08:00
|
|
|
fake_top = (uhwptr)&fake_stack[fake_stack_size + 2];
|
2013-02-22 23:10:16 +08:00
|
|
|
// Bottom is one slot before the start because FastUnwindStack uses >.
|
2015-03-04 07:46:40 +08:00
|
|
|
fake_bottom = (uhwptr)mapping;
|
2013-04-04 14:52:40 +08:00
|
|
|
start_pc = PC(0);
|
2013-02-22 23:10:16 +08:00
|
|
|
}
|
|
|
|
|
2014-10-14 21:46:07 +08:00
|
|
|
void FastUnwindTest::TearDown() {
|
|
|
|
size_t ps = GetPageSize();
|
|
|
|
UnmapOrDie(mapping, 2 * ps);
|
|
|
|
}
|
|
|
|
|
2013-02-22 23:10:16 +08:00
|
|
|
TEST_F(FastUnwindTest, Basic) {
|
2013-11-07 15:28:33 +08:00
|
|
|
if (!TryFastUnwind(kStackTraceMax))
|
|
|
|
return;
|
2013-02-22 23:10:16 +08:00
|
|
|
// Should get all on-stack retaddrs and start_pc.
|
2013-02-25 17:00:03 +08:00
|
|
|
EXPECT_EQ(6U, trace.size);
|
2013-02-22 23:10:16 +08:00
|
|
|
EXPECT_EQ(start_pc, trace.trace[0]);
|
2013-02-25 22:06:38 +08:00
|
|
|
for (uptr i = 1; i <= 5; i++) {
|
2013-04-04 14:52:40 +08:00
|
|
|
EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
|
2013-02-22 23:10:16 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-05 01:50:03 +08:00
|
|
|
// From: https://github.com/google/sanitizers/issues/162
|
2013-02-22 23:10:16 +08:00
|
|
|
TEST_F(FastUnwindTest, FramePointerLoop) {
|
|
|
|
// Make one fp point to itself.
|
2015-03-04 07:46:40 +08:00
|
|
|
fake_stack[4] = (uhwptr)&fake_stack[4];
|
2013-11-07 15:28:33 +08:00
|
|
|
if (!TryFastUnwind(kStackTraceMax))
|
|
|
|
return;
|
2013-02-22 23:10:16 +08:00
|
|
|
// Should get all on-stack retaddrs up to the 4th slot and start_pc.
|
2013-02-25 17:00:03 +08:00
|
|
|
EXPECT_EQ(4U, trace.size);
|
2013-02-22 23:10:16 +08:00
|
|
|
EXPECT_EQ(start_pc, trace.trace[0]);
|
2013-02-25 22:06:38 +08:00
|
|
|
for (uptr i = 1; i <= 3; i++) {
|
2013-04-04 14:52:40 +08:00
|
|
|
EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
|
2013-02-22 23:10:16 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-04 14:52:40 +08:00
|
|
|
TEST_F(FastUnwindTest, MisalignedFramePointer) {
|
|
|
|
// Make one fp misaligned.
|
|
|
|
fake_stack[4] += 3;
|
2013-11-07 15:28:33 +08:00
|
|
|
if (!TryFastUnwind(kStackTraceMax))
|
|
|
|
return;
|
2013-04-04 14:52:40 +08:00
|
|
|
// Should get all on-stack retaddrs up to the 4th slot and start_pc.
|
|
|
|
EXPECT_EQ(4U, trace.size);
|
|
|
|
EXPECT_EQ(start_pc, trace.trace[0]);
|
|
|
|
for (uptr i = 1; i < 4U; i++) {
|
|
|
|
EXPECT_EQ(PC(i*2 - 1), trace.trace[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-11 17:58:30 +08:00
|
|
|
TEST_F(FastUnwindTest, OneFrameStackTrace) {
|
2013-11-07 15:28:33 +08:00
|
|
|
if (!TryFastUnwind(1))
|
|
|
|
return;
|
2013-10-11 17:58:30 +08:00
|
|
|
EXPECT_EQ(1U, trace.size);
|
|
|
|
EXPECT_EQ(start_pc, trace.trace[0]);
|
2015-03-04 07:46:40 +08:00
|
|
|
EXPECT_EQ((uhwptr)&fake_stack[0], trace.top_frame_bp);
|
2013-10-11 17:58:30 +08:00
|
|
|
}
|
2013-04-04 14:52:40 +08:00
|
|
|
|
2014-03-04 22:06:11 +08:00
|
|
|
TEST_F(FastUnwindTest, ZeroFramesStackTrace) {
|
|
|
|
if (!TryFastUnwind(0))
|
|
|
|
return;
|
|
|
|
EXPECT_EQ(0U, trace.size);
|
|
|
|
EXPECT_EQ(0U, trace.top_frame_bp);
|
|
|
|
}
|
|
|
|
|
2014-10-14 21:46:07 +08:00
|
|
|
TEST_F(FastUnwindTest, FPBelowPrevFP) {
|
|
|
|
// The next FP points to unreadable memory inside the stack limits, but below
|
|
|
|
// current FP.
|
2015-03-04 07:46:40 +08:00
|
|
|
fake_stack[0] = (uhwptr)&fake_stack[-50];
|
2014-10-14 21:46:07 +08:00
|
|
|
fake_stack[1] = PC(1);
|
|
|
|
if (!TryFastUnwind(3))
|
|
|
|
return;
|
|
|
|
EXPECT_EQ(2U, trace.size);
|
|
|
|
EXPECT_EQ(PC(0), trace.trace[0]);
|
|
|
|
EXPECT_EQ(PC(1), trace.trace[1]);
|
|
|
|
}
|
|
|
|
|
2014-03-04 22:06:11 +08:00
|
|
|
TEST(SlowUnwindTest, ShortStackTrace) {
|
|
|
|
if (StackTrace::WillUseFastUnwind(false))
|
|
|
|
return;
|
2014-10-26 11:35:14 +08:00
|
|
|
BufferedStackTrace stack;
|
2014-03-04 22:06:11 +08:00
|
|
|
uptr pc = StackTrace::GetCurrentPc();
|
|
|
|
uptr bp = GET_CURRENT_FRAME();
|
|
|
|
stack.Unwind(0, pc, bp, 0, 0, 0, false);
|
|
|
|
EXPECT_EQ(0U, stack.size);
|
|
|
|
EXPECT_EQ(0U, stack.top_frame_bp);
|
|
|
|
stack.Unwind(1, pc, bp, 0, 0, 0, false);
|
|
|
|
EXPECT_EQ(1U, stack.size);
|
|
|
|
EXPECT_EQ(pc, stack.trace[0]);
|
|
|
|
EXPECT_EQ(bp, stack.top_frame_bp);
|
|
|
|
}
|
|
|
|
|
2013-02-22 23:10:16 +08:00
|
|
|
} // namespace __sanitizer
|