forked from OSchip/llvm-project
scudo: Fix various test failures, mostly on 32-bit.
Differential Revision: https://reviews.llvm.org/D74429
This commit is contained in:
parent
9df0c264d4
commit
87303fd917
|
@ -24,7 +24,6 @@ inline uptr scaledLog2(uptr Size, uptr ZeroLog, uptr LogBits) {
|
|||
|
||||
template <typename Config> struct SizeClassMapBase {
|
||||
static u32 getMaxCachedHint(uptr Size) {
|
||||
DCHECK_LE(Size, (1UL << Config::MaxSizeLog) + Chunk::getHeaderSize());
|
||||
DCHECK_NE(Size, 0);
|
||||
u32 N;
|
||||
// Force a 32-bit division if the template parameters allow for it.
|
||||
|
@ -95,10 +94,17 @@ public:
|
|||
return (Size + MinSize - 1) >> Config::MinSizeLog;
|
||||
return MidClass + 1 + scaledLog2(Size - 1, Config::MidSizeLog, S);
|
||||
}
|
||||
|
||||
static u32 getMaxCachedHint(uptr Size) {
|
||||
DCHECK_LE(Size, MaxSize);
|
||||
return Base::getMaxCachedHint(Size);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Config>
|
||||
class TableSizeClassMap : public SizeClassMapBase<Config> {
|
||||
typedef SizeClassMapBase<Config> Base;
|
||||
|
||||
static const u8 S = Config::NumBits - 1;
|
||||
static const uptr M = (1UL << S) - 1;
|
||||
static const uptr ClassesSize =
|
||||
|
@ -156,8 +162,10 @@ public:
|
|||
return Table.Tab[scaledLog2(Size - 1, Config::MidSizeLog, S)];
|
||||
}
|
||||
|
||||
static void print() {}
|
||||
static void validate() {}
|
||||
static u32 getMaxCachedHint(uptr Size) {
|
||||
DCHECK_LE(Size, MaxSize);
|
||||
return Base::getMaxCachedHint(Size);
|
||||
}
|
||||
};
|
||||
|
||||
struct AndroidSizeClassConfig {
|
||||
|
|
|
@ -268,10 +268,26 @@ TEST(ScudoWrappersCTest, MallocIterateBoundary) {
|
|||
const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U);
|
||||
const size_t SpecialSize = PageSize - BlockDelta;
|
||||
|
||||
void *P = malloc(SpecialSize);
|
||||
EXPECT_NE(P, nullptr);
|
||||
BoundaryP = reinterpret_cast<uintptr_t>(P);
|
||||
const uintptr_t Block = BoundaryP - BlockDelta;
|
||||
// We aren't guaranteed that any size class is exactly a page wide. So we need
|
||||
// to keep making allocations until we succeed.
|
||||
//
|
||||
// With a 16-byte block alignment and 4096-byte page size, each allocation has
|
||||
// a probability of (1 - (16/4096)) of failing to meet the alignment
|
||||
// requirements, and the probability of failing 65536 times is
|
||||
// (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after
|
||||
// 65536 tries, give up.
|
||||
uintptr_t Block;
|
||||
void *P = nullptr;
|
||||
for (unsigned I = 0; I != 65536; ++I) {
|
||||
void *PrevP = P;
|
||||
P = malloc(SpecialSize);
|
||||
EXPECT_NE(P, nullptr);
|
||||
*reinterpret_cast<void **>(P) = PrevP;
|
||||
BoundaryP = reinterpret_cast<uintptr_t>(P);
|
||||
Block = BoundaryP - BlockDelta;
|
||||
if ((Block & (PageSize - 1)) == 0U)
|
||||
break;
|
||||
}
|
||||
EXPECT_EQ((Block & (PageSize - 1)), 0U);
|
||||
|
||||
Count = 0U;
|
||||
|
@ -281,7 +297,11 @@ TEST(ScudoWrappersCTest, MallocIterateBoundary) {
|
|||
malloc_enable();
|
||||
EXPECT_EQ(Count, 1U);
|
||||
|
||||
free(P);
|
||||
while (P) {
|
||||
void *NextP = *reinterpret_cast<void **>(P);
|
||||
free(P);
|
||||
P = NextP;
|
||||
}
|
||||
}
|
||||
|
||||
// We expect heap operations within a disable/enable scope to deadlock.
|
||||
|
|
Loading…
Reference in New Issue