using SizeClassMap = AndroidSizeClassMap;
#if SCUDO_CAN_USE_PRIMARY64
// 256MB regions
- typedef SizeClassAllocator64<SizeClassMap, 28U,
+ typedef SizeClassAllocator64<SizeClassMap, 28U, 1000, 1000,
/*MaySupportMemoryTagging=*/true>
Primary;
#else
// 256KB regions
- typedef SizeClassAllocator32<SizeClassMap, 18U> Primary;
+ typedef SizeClassAllocator32<SizeClassMap, 18U, 1000, 1000> Primary;
#endif
// Cache blocks up to 2MB
- typedef MapAllocator<MapAllocatorCache<32U, 2UL << 20>> Secondary;
+ typedef MapAllocator<MapAllocatorCache<32U, 2UL << 20, 0, 1000>> Secondary;
template <class A>
using TSDRegistryT = TSDRegistrySharedT<A, 2U>; // Shared, max 2 TSDs.
};
using SizeClassMap = SvelteSizeClassMap;
#if SCUDO_CAN_USE_PRIMARY64
// 128MB regions
- typedef SizeClassAllocator64<SizeClassMap, 27U> Primary;
+ typedef SizeClassAllocator64<SizeClassMap, 27U, 1000, 1000> Primary;
#else
// 64KB regions
- typedef SizeClassAllocator32<SizeClassMap, 16U> Primary;
+ typedef SizeClassAllocator32<SizeClassMap, 16U, 1000, 1000> Primary;
#endif
- typedef MapAllocator<MapAllocatorCache<4U, 1UL << 18>> Secondary;
+ typedef MapAllocator<MapAllocatorCache<4U, 1UL << 18, 0, 0>> Secondary;
template <class A>
using TSDRegistryT = TSDRegistrySharedT<A, 1U>; // Shared, only 1 TSD.
};
namespace scudo {
+enum class Option { ReleaseInterval };
+
template <class Params, void (*PostInitCallback)(void) = EmptyCallback>
class Allocator {
public:
return Options.MayReturnNull;
}
- // TODO(kostyak): implement this as a "backend" to mallopt.
- bool setOption(UNUSED uptr Option, UNUSED uptr Value) { return false; }
+ bool setOption(Option O, sptr Value) {
+ if (O == Option::ReleaseInterval) {
+ Primary.setReleaseToOsIntervalMs(static_cast<s32>(Value));
+ Secondary.setReleaseToOsIntervalMs(static_cast<s32>(Value));
+ return true;
+ }
+ return false;
+ }
// Return the usable size for a given chunk. Technically we lie, as we just
// report the actual size of a chunk. This is done to counteract code actively
"returning NULL in otherwise non-fatal error scenarios, eg: OOM, "
"invalid allocation alignments, etc.")
-SCUDO_FLAG(int, release_to_os_interval_ms, SCUDO_ANDROID ? 1000 : 5000,
+SCUDO_FLAG(int, release_to_os_interval_ms, SCUDO_ANDROID ? INT32_MIN : 5000,
"Interval (in milliseconds) at which to attempt release of unused "
"memory to the OS. Negative values disable the feature.")
// Memory used by this allocator is never unmapped but can be partially
// reclaimed if the platform allows for it.
-template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
+template <class SizeClassMapT, uptr RegionSizeLog,
+ s32 MinReleaseToOsIntervalMs = INT32_MIN,
+ s32 MaxReleaseToOsIntervalMs = INT32_MAX> class SizeClassAllocator32 {
public:
typedef SizeClassMapT SizeClassMap;
// The bytemap can only track UINT8_MAX - 1 classes.
static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
// Regions should be large enough to hold the largest Block.
static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, "");
- typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
+ typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog,
+ MinReleaseToOsIntervalMs,
+ MaxReleaseToOsIntervalMs> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef typename CacheT::TransferBatch TransferBatch;
static const bool SupportsMemoryTagging = false;
Sci->CanRelease = (I != SizeClassMap::BatchClassId) &&
(getSizeByClassId(I) >= (PageSize / 32));
}
- ReleaseToOsIntervalMs = ReleaseToOsInterval;
+ setReleaseToOsIntervalMs(ReleaseToOsInterval);
}
void init(s32 ReleaseToOsInterval) {
memset(this, 0, sizeof(*this));
getStats(Str, I, 0);
}
+ void setReleaseToOsIntervalMs(s32 Interval) {
+ if (Interval >= MaxReleaseToOsIntervalMs) {
+ Interval = MaxReleaseToOsIntervalMs;
+ } else if (Interval <= MinReleaseToOsIntervalMs) {
+ Interval = MinReleaseToOsIntervalMs;
+ }
+ atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
+ }
+
uptr releaseToOS() {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
}
+ s32 getReleaseToOsIntervalMs() {
+ return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
+ }
+
NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
bool Force = false) {
const uptr BlockSize = getSizeByClassId(ClassId);
}
if (!Force) {
- const s32 IntervalMs = ReleaseToOsIntervalMs;
+ const s32 IntervalMs = getReleaseToOsIntervalMs();
if (IntervalMs < 0)
return 0;
if (Sci->ReleaseInfo.LastReleaseAtNs +
// through the whole NumRegions.
uptr MinRegionIndex;
uptr MaxRegionIndex;
- s32 ReleaseToOsIntervalMs;
+ atomic_s32 ReleaseToOsIntervalMs;
// Unless several threads request regions simultaneously from different size
// classes, the stash rarely contains more than 1 entry.
static constexpr uptr MaxStashedRegions = 4;
// released if the platform allows for it.
template <class SizeClassMapT, uptr RegionSizeLog,
+ s32 MinReleaseToOsIntervalMs = INT32_MIN,
+ s32 MaxReleaseToOsIntervalMs = INT32_MAX,
bool MaySupportMemoryTagging = false>
class SizeClassAllocator64 {
public:
typedef SizeClassMapT SizeClassMap;
typedef SizeClassAllocator64<SizeClassMap, RegionSizeLog,
+ MinReleaseToOsIntervalMs,
+ MaxReleaseToOsIntervalMs,
MaySupportMemoryTagging>
ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
(getSizeByClassId(I) >= (PageSize / 32));
Region->RandState = getRandomU32(&Seed);
}
- ReleaseToOsIntervalMs = ReleaseToOsInterval;
+ setReleaseToOsIntervalMs(ReleaseToOsInterval);
if (SupportsMemoryTagging)
UseMemoryTagging = systemSupportsMemoryTagging();
getStats(Str, I, 0);
}
+ void setReleaseToOsIntervalMs(s32 Interval) {
+ if (Interval >= MaxReleaseToOsIntervalMs) {
+ Interval = MaxReleaseToOsIntervalMs;
+ } else if (Interval <= MinReleaseToOsIntervalMs) {
+ Interval = MinReleaseToOsIntervalMs;
+ }
+ atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
+ }
+
uptr releaseToOS() {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
uptr PrimaryBase;
RegionInfo *RegionInfoArray;
MapPlatformData Data;
- s32 ReleaseToOsIntervalMs;
+ atomic_s32 ReleaseToOsIntervalMs;
bool UseMemoryTagging;
RegionInfo *getRegionInfo(uptr ClassId) const {
getRegionBaseByClassId(ClassId));
}
+ s32 getReleaseToOsIntervalMs() {
+ return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
+ }
+
NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
bool Force = false) {
const uptr BlockSize = getSizeByClassId(ClassId);
}
if (!Force) {
- const s32 IntervalMs = ReleaseToOsIntervalMs;
+ const s32 IntervalMs = getReleaseToOsIntervalMs();
if (IntervalMs < 0)
return 0;
if (Region->ReleaseInfo.LastReleaseAtNs +
void releaseToOS() {}
};
-template <uptr MaxEntriesCount = 32U, uptr MaxEntrySize = 1UL << 19>
+template <uptr MaxEntriesCount = 32U, uptr MaxEntrySize = 1UL << 19,
+ s32 MinReleaseToOsIntervalMs = INT32_MIN,
+ s32 MaxReleaseToOsIntervalMs = INT32_MAX>
class MapAllocatorCache {
public:
// Fuchsia doesn't allow releasing Secondary blocks yet. Note that 0 length
static_assert(!SCUDO_FUCHSIA || MaxEntriesCount == 0U, "");
void initLinkerInitialized(s32 ReleaseToOsInterval) {
- ReleaseToOsIntervalMs = ReleaseToOsInterval;
+ setReleaseToOsIntervalMs(ReleaseToOsInterval);
}
void init(s32 ReleaseToOsInterval) {
memset(this, 0, sizeof(*this));
}
}
}
+ s32 Interval;
if (EmptyCache)
empty();
- else if (ReleaseToOsIntervalMs >= 0)
- releaseOlderThan(Time -
- static_cast<u64>(ReleaseToOsIntervalMs) * 1000000);
+ else if ((Interval = getReleaseToOsIntervalMs()) >= 0)
+ releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
return EntryCached;
}
return MaxEntriesCount != 0U && Size <= MaxEntrySize;
}
+ void setReleaseToOsIntervalMs(s32 Interval) {
+ if (Interval >= MaxReleaseToOsIntervalMs) {
+ Interval = MaxReleaseToOsIntervalMs;
+ } else if (Interval <= MinReleaseToOsIntervalMs) {
+ Interval = MinReleaseToOsIntervalMs;
+ }
+ atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
+ }
+
void releaseToOS() { releaseOlderThan(UINT64_MAX); }
void disable() { Mutex.lock(); }
}
}
+ s32 getReleaseToOsIntervalMs() {
+ return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
+ }
+
struct CachedBlock {
uptr Block;
uptr BlockEnd;
u32 EntriesCount;
uptr LargestSize;
u32 IsFullEvents;
- s32 ReleaseToOsIntervalMs;
+ atomic_s32 ReleaseToOsIntervalMs;
};
template <class CacheT> class MapAllocator {
static uptr canCache(uptr Size) { return CacheT::canCache(Size); }
+ void setReleaseToOsIntervalMs(s32 Interval) {
+ Cache.setReleaseToOsIntervalMs(Interval);
+ }
+
void releaseToOS() { Cache.releaseToOS(); }
private:
INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) {
if (param == M_DECAY_TIME) {
- // TODO(kostyak): set release_to_os_interval_ms accordingly.
+ if (SCUDO_ANDROID) {
+ if (value == 0) {
+ // Will set the release values to their minimum values.
+ value = INT32_MIN;
+ } else {
+ // Will set the release values to their maximum values.
+ value = INT32_MAX;
+ }
+ }
+
+ SCUDO_ALLOCATOR.setOption(scudo::Option::ReleaseInterval,
+ static_cast<scudo::sptr>(value));
return 1;
} else if (param == M_PURGE) {
SCUDO_ALLOCATOR.releaseToOS();