forked from OSchip/llvm-project
[scudo][standalone] Allow Primary allocations to fail up multiple times.
Summary: When enabling some malloc debug features on Android, multiple 32 bit regions become exhausted, and the allocations fail. Allow allocations to keep trying each bigger class in the Primary until it finds a fit. In addition, some Android tests running on 32 bit fail sometimes due to a running out of space in two regions, and then fail the allocation. Reviewers: cryptoad Reviewed By: cryptoad Subscribers: #sanitizers, llvm-commits Tags: #sanitizers Differential Revision: https://reviews.llvm.org/D82070
This commit is contained in:
parent
2defe55722
commit
e7ac984dc0
|
@ -297,17 +297,21 @@ public:
|
|||
bool UnlockRequired;
|
||||
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
|
||||
Block = TSD->Cache.allocate(ClassId);
|
||||
// If the allocation failed, the most likely reason with a 64-bit primary
|
||||
// is the region being full. In that event, retry once using the
|
||||
// immediately larger class (except if the failing class was already the
|
||||
// largest). This will waste some memory but will allow the application to
|
||||
// not fail. If dealing with the largest class, fallback to the Secondary.
|
||||
// If the allocation failed, the most likely reason with a 32-bit primary
|
||||
// is the region being full. In that event, retry in each successively
|
||||
// larger class until it fits. If it fails to fit in the largest class,
|
||||
// fallback to the Secondary.
|
||||
if (UNLIKELY(!Block)) {
|
||||
if (ClassId < SizeClassMap::LargestClassId)
|
||||
while (ClassId < SizeClassMap::LargestClassId) {
|
||||
Block = TSD->Cache.allocate(++ClassId);
|
||||
else
|
||||
if (LIKELY(Block)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (UNLIKELY(!Block)) {
|
||||
ClassId = 0;
|
||||
}
|
||||
}
|
||||
if (UnlockRequired)
|
||||
TSD->unlock();
|
||||
}
|
||||
|
|
|
@ -385,14 +385,14 @@ struct DeathSizeClassConfig {
|
|||
static const scudo::uptr NumBits = 1;
|
||||
static const scudo::uptr MinSizeLog = 10;
|
||||
static const scudo::uptr MidSizeLog = 10;
|
||||
static const scudo::uptr MaxSizeLog = 11;
|
||||
static const scudo::uptr MaxSizeLog = 13;
|
||||
static const scudo::u32 MaxNumCachedHint = 4;
|
||||
static const scudo::uptr MaxBytesCachedLog = 12;
|
||||
};
|
||||
|
||||
static const scudo::uptr DeathRegionSizeLog = 20U;
|
||||
struct DeathConfig {
|
||||
// Tiny allocator, its Primary only serves chunks of two sizes.
|
||||
// Tiny allocator, its Primary only serves chunks of four sizes.
|
||||
using DeathSizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
|
||||
typedef scudo::SizeClassAllocator64<DeathSizeClassMap, DeathRegionSizeLog>
|
||||
Primary;
|
||||
|
@ -472,7 +472,10 @@ TEST(ScudoCombinedTest, FullRegion) {
|
|||
ClassId <= DeathConfig::DeathSizeClassMap::LargestClassId; ClassId++) {
|
||||
const scudo::uptr Size =
|
||||
DeathConfig::DeathSizeClassMap::getSizeByClassId(ClassId);
|
||||
const scudo::uptr MaxNumberOfChunks = (1U << DeathRegionSizeLog) / Size;
|
||||
// Allocate enough to fill all of the regions above this one.
|
||||
const scudo::uptr MaxNumberOfChunks =
|
||||
((1U << DeathRegionSizeLog) / Size) *
|
||||
(DeathConfig::DeathSizeClassMap::LargestClassId - ClassId + 1);
|
||||
void *P;
|
||||
for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
|
||||
P = Allocator->allocate(Size - 64U, Origin);
|
||||
|
@ -481,10 +484,10 @@ TEST(ScudoCombinedTest, FullRegion) {
|
|||
else
|
||||
V.push_back(P);
|
||||
}
|
||||
}
|
||||
while (!V.empty()) {
|
||||
Allocator->deallocate(V.back(), Origin);
|
||||
V.pop_back();
|
||||
}
|
||||
}
|
||||
EXPECT_EQ(FailedAllocationsCount, 0U);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue