[compiler-rt] Enhance unittest coverage for lib interception

Summary:
This patch is adding more unittests for testing the interception
of 32-bits code.

Reviewers: rnk

Subscribers: llvm-commits, wang0109, chrisha

Differential Revision: http://reviews.llvm.org/D22077

llvm-svn: 274775
This commit is contained in:
Etienne Bergeron 2016-07-07 17:51:50 +00:00
parent 1d106c5fc2
commit 31f4672f93
2 changed files with 127 additions and 24 deletions

View File

@ -129,14 +129,14 @@ static size_t RoundUpToInstrBoundary(size_t size, char *code) {
continue;
}
switch (*(unsigned short*)(code + cursor)) { // NOLINT
switch (*(u16*)(code + cursor)) { // NOLINT
case 0x5540: // 40 55 : rex push rbp
case 0x5340: // 40 53 : rex push rbx
cursor += 2;
continue;
}
switch (0x00FFFFFF & *(unsigned int*)(code + cursor)) {
switch (0x00FFFFFF & *(u32*)(code + cursor)) {
case 0xc18b48: // 48 8b c1 : mov rax, rcx
case 0xc48b48: // 48 8b c4 : mov rax, rsp
case 0xd9f748: // 48 f7 d9 : neg rcx
@ -171,14 +171,14 @@ static size_t RoundUpToInstrBoundary(size_t size, char *code) {
continue;
}
switch (*(unsigned int*)(code + cursor)) {
switch (*(u32*)(code + cursor)) {
case 0x24448b48: // 48 8b 44 24 XX : mov rax, qword ptr [rsp + 0xXX]
cursor += 5;
continue;
}
// Check first 5 bytes.
switch (0xFFFFFFFFFFull & *(unsigned long long*)(code + cursor)) {
switch (0xFFFFFFFFFFull & *(u64*)(code + cursor)) {
case 0x08245c8948: // 48 89 5c 24 08 : mov QWORD PTR [rsp+0x8], rbx
case 0x1024748948: // 48 89 74 24 10 : mov QWORD PTR [rsp+0x10], rsi
cursor += 5;
@ -186,7 +186,7 @@ static size_t RoundUpToInstrBoundary(size_t size, char *code) {
}
// Check 8 bytes.
switch (*(unsigned long long*)(code + cursor)) {
switch (*(u64*)(code + cursor)) {
case 0x90909090909006EBull: // JMP +6, 6x NOP
cursor += 8;
continue;
@ -201,6 +201,28 @@ static size_t RoundUpToInstrBoundary(size_t size, char *code) {
size_t cursor = 0;
while (cursor < size) {
switch (code[cursor]) {
case '\xE8': // E8 XX XX XX XX = call <func>
case '\xE9': // E9 XX XX XX XX = jmp <label>
case '\xC3': // C3 = ret
case '\xEB': // EB XX = jmp XX (short jump)
case '\x70': // 7X YY = jx XX (short conditional jump)
case '\x71':
case '\x72':
case '\x73':
case '\x74':
case '\x75':
case '\x76':
case '\x77':
case '\x78':
case '\x79':
case '\x7A':
case '\x7B':
case '\x7C':
case '\x7D':
case '\x7E':
case '\x7F':
return 0;
case '\x50': // push eax
case '\x51': // push ecx
case '\x52': // push edx
@ -215,15 +237,15 @@ static size_t RoundUpToInstrBoundary(size_t size, char *code) {
case '\x6A': // 6A XX = push XX
cursor += 2;
continue;
case '\xE9': // E9 XX YY ZZ WW = jmp WWZZYYXX
case '\xB8': // B8 XX YY ZZ WW = mov eax, WWZZYYXX
cursor += 5;
continue;
}
switch (*(unsigned short*)(code + cursor)) { // NOLINT
switch (*(u16*)(code + cursor)) { // NOLINT
case 0xFF8B: // 8B FF = mov edi, edi
case 0xEC8B: // 8B EC = mov ebp, esp
case 0xC033: // 33 C0 = xor eax, eax
case 0xC933: // 33 C9 = xor ecx, ecx
cursor += 2;
continue;
case 0x458B: // 8B 45 XX = mov eax, dword ptr [ebp+XXh]
@ -244,7 +266,7 @@ static size_t RoundUpToInstrBoundary(size_t size, char *code) {
cursor += 4;
continue;
}
switch (0x00FFFFFF & *(unsigned int*)(code + cursor)) {
switch (0x00FFFFFF & *(u32*)(code + cursor)) {
case 0x24448A: // 8A 44 24 XX = mov eal, dword ptr [esp+XXh]
case 0x24448B: // 8B 44 24 XX = mov eax, dword ptr [esp+XXh]
case 0x244C8B: // 8B 4C 24 XX = mov ecx, dword ptr [esp+XXh]
@ -254,7 +276,7 @@ static size_t RoundUpToInstrBoundary(size_t size, char *code) {
cursor += 4;
continue;
}
switch (*(unsigned int *)(code + cursor)) {
switch (*(u32*)(code + cursor)) {
case 0x2444B60F: // 0F B6 44 24 XX = movzx eax, byte ptr [esp+XXh]
cursor += 5;
continue;

View File

@ -27,7 +27,8 @@ namespace {
typedef int (*IdentityFunction)(int);
#if !SANITIZER_WINDOWS64
u8 kIdentityCodeWithPrologue[] = {
const u8 kIdentityCodeWithPrologue[] = {
0x55, // push ebp
0x8B, 0xEC, // mov ebp,esp
0x8B, 0x45, 0x08, // mov eax,dword ptr [ebp + 8]
@ -35,7 +36,7 @@ u8 kIdentityCodeWithPrologue[] = {
0xC3, // ret
};
u8 kIdentityCodeWithPushPop[] = {
const u8 kIdentityCodeWithPushPop[] = {
0x55, // push ebp
0x8B, 0xEC, // mov ebp,esp
0x53, // push ebx
@ -47,28 +48,85 @@ u8 kIdentityCodeWithPushPop[] = {
0xC3, // ret
};
const u8 kPatchableCode1[] = {
0xB8, 0x4B, 0x00, 0x00, 0x00, // mov eax,4B
0x33, 0xC9, // xor ecx,ecx
0xC3, // ret
};
const u8 kPatchableCode2[] = {
0x55, // push ebp
0x8B, 0xEC, // mov ebp,esp
0x33, 0xC0, // xor eax,eax
0x5D, // pop ebp
0xC3, // ret
};
const u8 kPatchableCode3[] = {
0x55, // push ebp
0x8B, 0xEC, // mov ebp,esp
0x6A, 0x00, // push 0
0xE8, 0x3D, 0xFF, 0xFF, 0xFF, // call <func>
};
const u8 kUnpatchableCode1[] = {
0xC3, // ret
};
const u8 kUnpatchableCode2[] = {
0x33, 0xC9, // xor ecx,ecx
0xC3, // ret
};
const u8 kUnpatchableCode3[] = {
0x75, 0xCC, // jne <label>
0x33, 0xC9, // xor ecx,ecx
0xC3, // ret
};
const u8 kUnpatchableCode4[] = {
0x74, 0xCC, // jne <label>
0x33, 0xC9, // xor ecx,ecx
0xC3, // ret
};
const u8 kUnpatchableCode5[] = {
0xEB, 0x02, // jmp <label>
0x33, 0xC9, // xor ecx,ecx
0xC3, // ret
};
const u8 kUnpatchableCode6[] = {
0xE9, 0xCC, 0xCC, 0xCC, 0xCC, // jmp <label>
0x90, 0x90, 0x90, 0x90,
};
const u8 kUnpatchableCode7[] = {
0xE8, 0xCC, 0xCC, 0xCC, 0xCC, // call <func>
0x90, 0x90, 0x90, 0x90,
};
#endif
// A buffer holding the dynamically generated code under test.
u8* ActiveCode;
size_t ActiveCodeLength = 4096;
bool LoadActiveCode(u8* Code, size_t CodeLength, uptr* EntryPoint) {
template<class T>
void LoadActiveCode(const T &Code, uptr *EntryPoint) {
if (ActiveCode == nullptr) {
ActiveCode =
(u8*)::VirtualAlloc(nullptr, ActiveCodeLength, MEM_COMMIT | MEM_RESERVE,
PAGE_EXECUTE_READWRITE);
if (ActiveCode == nullptr) return false;
ASSERT_NE(ActiveCode, nullptr);
}
size_t Position = 0;
*EntryPoint = (uptr)&ActiveCode[0];
// Copy the function body.
for (size_t i = 0; i < CodeLength; ++i)
ActiveCode[Position++] = Code[i];
return true;
for (size_t i = 0; i < sizeof(T); ++i)
ActiveCode[Position++] = Code[i];
}
int InterceptorFunctionCalled;
@ -96,10 +154,21 @@ TEST(Interception, InternalGetProcAddress) {
EXPECT_NE(DbgPrint_adddress, isdigit_address);
}
void TestIdentityFunctionPatching(u8* IdentityCode, size_t IdentityCodeLength) {
template<class T>
bool TestFunctionPatching(const T &Code) {
uptr Address;
int x = sizeof(T);
LoadActiveCode<T>(Code, &Address);
uptr UnusedRealAddress = 0;
return OverrideFunction(Address, (uptr)&InterceptorFunction,
&UnusedRealAddress);
}
template<class T>
void TestIdentityFunctionPatching(const T &IdentityCode) {
uptr IdentityAddress;
ASSERT_TRUE(
LoadActiveCode(IdentityCode, IdentityCodeLength, &IdentityAddress));
LoadActiveCode<T>(IdentityCode, &IdentityAddress);
IdentityFunction Identity = (IdentityFunction)IdentityAddress;
// Validate behavior before dynamic patching.
@ -129,10 +198,22 @@ void TestIdentityFunctionPatching(u8* IdentityCode, size_t IdentityCodeLength) {
#if !SANITIZER_WINDOWS64
TEST(Interception, OverrideFunction) {
TestIdentityFunctionPatching(kIdentityCodeWithPrologue,
sizeof(kIdentityCodeWithPrologue));
TestIdentityFunctionPatching(kIdentityCodeWithPushPop,
sizeof(kIdentityCodeWithPushPop));
TestIdentityFunctionPatching(kIdentityCodeWithPrologue);
TestIdentityFunctionPatching(kIdentityCodeWithPushPop);
}
TEST(Interception, PatchableFunction) {
EXPECT_TRUE(TestFunctionPatching(kPatchableCode1));
EXPECT_TRUE(TestFunctionPatching(kPatchableCode2));
EXPECT_TRUE(TestFunctionPatching(kPatchableCode3));
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1));
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode2));
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3));
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode4));
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode5));
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6));
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode7));
}
#endif