}
-HeapObject* PagedSpace::EnsureSweepingProgress(intptr_t size_in_bytes) {
+HeapObject* PagedSpace::EnsureSweepingProgress(int size_in_bytes) {
+ ASSERT(size_in_bytes >= 0);
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->IsConcurrentSweepingInProgress(this)) {
// If sweeping is still in progress try to sweep pages on the main thread.
intptr_t free_chunk =
collector->SweepInParallel(this, size_in_bytes);
- if (free_chunk >= size_in_bytes) {
+ if (free_chunk >= static_cast<intptr_t>(size_in_bytes)) {
HeapObject* object = free_list_.Allocate(size_in_bytes);
// We should be able to allocate an object here since we just freed that
// much memory.
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
// allocation.
- MUST_USE_RESULT HeapObject* EnsureSweepingProgress(intptr_t size_in_bytes);
+ MUST_USE_RESULT HeapObject* EnsureSweepingProgress(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);