int MarkCompactCollector::SweepInParallel(PagedSpace* space,
- int required_freed_bytes) {
+ intptr_t required_freed_bytes) {
PageIterator it(space);
FreeList* free_list = space == heap()->old_pointer_space()
? free_list_old_pointer_space_.get()
: free_list_old_data_space_.get();
FreeList private_free_list(space);
- int max_freed = 0;
- int max_freed_overall = 0;
+ intptr_t max_freed = 0;
+ intptr_t max_freed_overall = 0;
while (it.has_next()) {
Page* p = it.next();
// to a value larger than 0, then sweeping returns after a block of at least
// required_freed_bytes was freed. If required_freed_bytes was set to zero
// then the whole given space is swept.
- int SweepInParallel(PagedSpace* space, int required_freed_bytes);
+ int SweepInParallel(PagedSpace* space, intptr_t required_freed_bytes);
void WaitUntilSweepingCompleted();
}
-HeapObject* PagedSpace::EnsureSweepingProgress(
- int size_in_bytes) {
+HeapObject* PagedSpace::EnsureSweepingProgress(intptr_t size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->IsConcurrentSweepingInProgress(this)) {
// If sweeping is still in progress try to sweep pages on the main thread.
- int free_chunk =
+ intptr_t free_chunk =
collector->SweepInParallel(this, size_in_bytes);
if (free_chunk >= size_in_bytes) {
HeapObject* object = free_list_.Allocate(size_in_bytes);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
// allocation.
- MUST_USE_RESULT HeapObject* EnsureSweepingProgress(int size_in_bytes);
+ MUST_USE_RESULT HeapObject* EnsureSweepingProgress(intptr_t size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);