return ExternalReference(Heap::NewSpaceAllocationTopAddress());
}
+ExternalReference ExternalReference::heap_always_allocate_scope_depth() {
+ return ExternalReference(Heap::always_allocate_scope_depth_address());
+}
+
ExternalReference ExternalReference::new_space_allocation_limit_address() {
return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
}
// Static variable Heap::NewSpaceStart()
static ExternalReference new_space_start();
+ static ExternalReference heap_always_allocate_scope_depth();
// Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address();
Label* throw_normal_exception,
Label* throw_out_of_memory_exception,
StackFrame::Type frame_type,
- bool do_gc) {
+ bool do_gc,
+ bool always_allocate) {
// r0: result parameter for PerformGC, if any
// r4: number of arguments including receiver (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
__ Call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
}
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate) {
+ __ mov(r0, Operand(scope_depth));
+ __ ldr(r1, MemOperand(r0));
+ __ add(r1, r1, Operand(1));
+ __ str(r1, MemOperand(r0));
+ }
+
// Call C built-in.
// r0 = argc, r1 = argv
__ mov(r0, Operand(r4));
#else /* !defined(__arm__) */
__ mov(pc, Operand(r5));
#endif /* !defined(__arm__) */
- // result is in r0 or r0:r1 - do not destroy these registers!
+
+ if (always_allocate) {
+ // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
+ // though (contain the result).
+ __ mov(r2, Operand(scope_depth));
+ __ ldr(r3, MemOperand(r2));
+ __ sub(r3, r3, Operand(1));
+ __ str(r3, MemOperand(r2));
+ }
// check for failure result
Label failure_returned;
GenerateCore(masm, &throw_normal_exception,
&throw_out_of_memory_exception,
frame_type,
- FLAG_gc_greedy);
+ FLAG_gc_greedy,
+ false);
// Do space-specific GC and retry runtime call.
GenerateCore(masm,
&throw_normal_exception,
&throw_out_of_memory_exception,
frame_type,
- true);
+ true,
+ false);
// Do full GC and retry runtime call one final time.
Failure* failure = Failure::InternalError();
&throw_normal_exception,
&throw_out_of_memory_exception,
frame_type,
+ true,
true);
__ bind(&throw_out_of_memory_exception);
Label* throw_normal_exception,
Label* throw_out_of_memory_exception,
StackFrame::Type frame_type,
- bool do_gc) {
+ bool do_gc,
+ bool always_allocate_scope) {
// eax: result parameter for PerformGC, if any
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)
__ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
}
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate_scope) {
+ __ inc(Operand::StaticVariable(scope_depth));
+ }
+
// Call C function.
__ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
__ call(Operand(ebx));
// Result is in eax or edx:eax - do not destroy these registers!
+ if (always_allocate_scope) {
+ __ dec(Operand::StaticVariable(scope_depth));
+ }
+
// Check for failure result.
Label failure_returned;
ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
GenerateCore(masm, &throw_normal_exception,
&throw_out_of_memory_exception,
frame_type,
- FLAG_gc_greedy);
+ FLAG_gc_greedy,
+ false);
// Do space-specific GC and retry runtime call.
GenerateCore(masm,
&throw_normal_exception,
&throw_out_of_memory_exception,
frame_type,
- true);
+ true,
+ false);
// Do full GC and retry runtime call one final time.
Failure* failure = Failure::InternalError();
&throw_normal_exception,
&throw_out_of_memory_exception,
frame_type,
+ true,
true);
__ bind(&throw_out_of_memory_exception);
Label* throw_normal_exception,
Label* throw_out_of_memory_exception,
StackFrame::Type frame_type,
- bool do_gc);
+ bool do_gc,
+ bool always_allocate_scope);
void GenerateThrowTOS(MacroAssembler* masm);
void GenerateThrowOutOfMemory(MacroAssembler* masm);
Object* Heap::AllocateRaw(int size_in_bytes,
- AllocationSpace space) {
+ AllocationSpace space,
+ AllocationSpace retry_space) {
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+ ASSERT(space != NEW_SPACE ||
+ retry_space == OLD_POINTER_SPACE ||
+ retry_space == OLD_DATA_SPACE);
#ifdef DEBUG
if (FLAG_gc_interval >= 0 &&
!disallow_allocation_failure_ &&
Counters::objs_since_last_full.Increment();
Counters::objs_since_last_young.Increment();
#endif
+ Object* result;
if (NEW_SPACE == space) {
- return new_space_.AllocateRaw(size_in_bytes);
+ result = new_space_.AllocateRaw(size_in_bytes);
+ if (always_allocate() && result->IsFailure()) {
+ space = retry_space;
+ } else {
+ return result;
+ }
}
- Object* result;
if (OLD_POINTER_SPACE == space) {
result = old_pointer_space_->AllocateRaw(size_in_bytes);
} else if (OLD_DATA_SPACE == space) {
OldSpace* Heap::TargetSpace(HeapObject* object) {
+ InstanceType type = object->map()->instance_type();
+ AllocationSpace space = TargetSpaceId(type);
+ return (space == OLD_POINTER_SPACE)
+ ? old_pointer_space_
+ : old_data_space_;
+}
+
+
+AllocationSpace Heap::TargetSpaceId(InstanceType type) {
// Heap numbers and sequential strings are promoted to old data space, all
// other object types are promoted to old pointer space. We do not use
// object->IsHeapNumber() and object->IsSeqString() because we already
// know that object has the heap object tag.
- InstanceType type = object->map()->instance_type();
ASSERT((type != CODE_TYPE) && (type != MAP_TYPE));
bool has_pointers =
type != HEAP_NUMBER_TYPE &&
(type >= FIRST_NONSTRING_TYPE ||
- String::cast(object)->representation_tag() != kSeqStringTag);
- return has_pointers ? old_pointer_space_ : old_data_space_;
+ (type & kStringRepresentationMask) != kSeqStringTag);
+ return has_pointers ? OLD_POINTER_SPACE : OLD_DATA_SPACE;
}
#define GC_GREEDY_CHECK() \
ASSERT(!FLAG_gc_greedy || v8::internal::Heap::GarbageCollectionGreedyCheck())
-// Do not use the identifier __object__ in a call to this macro.
-//
-// Call the function FUNCTION_CALL. If it fails with a RetryAfterGC
-// failure, call the garbage collector and retry the function. If the
-// garbage collector cannot reclaim the required space or the second
-// call fails with a RetryAfterGC failure, fail with out of memory.
-// If there is any other failure, return a null handle. If either
-// call succeeds, return a handle to the functions return value.
-//
-// Note that this macro always returns or raises a fatal error.
-#define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE) \
- do { \
- GC_GREEDY_CHECK(); \
- Object* __object__ = FUNCTION_CALL; \
- if (__object__->IsFailure()) { \
- if (__object__->IsRetryAfterGC()) { \
- if (!Heap::CollectGarbage( \
- Failure::cast(__object__)->requested(), \
- Failure::cast(__object__)->allocation_space())) { \
- /* TODO(1181417): Fix this. */ \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_HEAP_FUNCTION"); \
- } \
- __object__ = FUNCTION_CALL; \
- if (__object__->IsFailure()) { \
- if (__object__->IsRetryAfterGC()) { \
- /* TODO(1181417): Fix this. */ \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_HEAP_FUNCTION"); \
- } \
- return Handle<TYPE>(); \
- } \
- } else { \
- if (__object__->IsOutOfMemoryFailure()) { \
- v8::internal::V8::FatalProcessOutOfMemory("CALL_HEAP_FUNCTION"); \
- } \
- return Handle<TYPE>(); \
- } \
- } \
- return Handle<TYPE>(TYPE::cast(__object__)); \
+
+// Calls the FUNCTION_CALL function and retries it up to three times
+// to guarantee that any allocations performed during the call will
+// succeed if there's enough memory.
+
+// Warning: Do not use the identifiers __object__ or __scope__ in a
+// call to this macro.
+
+#define CALL_AND_RETRY(FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
+ do { \
+ GC_GREEDY_CHECK(); \
+ Object* __object__ = FUNCTION_CALL; \
+ if (!__object__->IsFailure()) return RETURN_VALUE; \
+ if (__object__->IsOutOfMemoryFailure()) { \
+ v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0"); \
+ } \
+ if (!__object__->IsRetryAfterGC()) return RETURN_EMPTY; \
+ if (!Heap::CollectGarbage( \
+ Failure::cast(__object__)->requested(), \
+ Failure::cast(__object__)->allocation_space())) { \
+ v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1"); \
+ return RETURN_EMPTY; \
+ } \
+ __object__ = FUNCTION_CALL; \
+ if (!__object__->IsFailure()) return RETURN_VALUE; \
+ if (__object__->IsOutOfMemoryFailure()) { \
+ v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2"); \
+ } \
+ if (!__object__->IsRetryAfterGC()) return RETURN_EMPTY; \
+ Counters::gc_last_resort_from_handles.Increment(); \
+ Heap::CollectAllGarbage(); \
+ { \
+ AlwaysAllocateScope __scope__; \
+ __object__ = FUNCTION_CALL; \
+ } \
+ if (!__object__->IsFailure()) return RETURN_VALUE; \
+ if (__object__->IsOutOfMemoryFailure()) { \
+ /* TODO(1181417): Fix this. */ \
+ v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_3"); \
+ } \
+ ASSERT(!__object__->IsRetryAfterGC()); \
+ return RETURN_EMPTY; \
} while (false)
-// Don't use the following names: __object__, __failure__.
-#define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \
- GC_GREEDY_CHECK(); \
- Object* __object__ = FUNCTION_CALL; \
- if (__object__->IsFailure()) { \
- if (__object__->IsRetryAfterGC()) { \
- Failure* __failure__ = Failure::cast(__object__); \
- if (!Heap::CollectGarbage(__failure__->requested(), \
- __failure__->allocation_space())) { \
- /* TODO(1181417): Fix this. */ \
- V8::FatalProcessOutOfMemory("Handles"); \
- } \
- __object__ = FUNCTION_CALL; \
- if (__object__->IsFailure()) { \
- if (__object__->IsRetryAfterGC()) { \
- /* TODO(1181417): Fix this. */ \
- V8::FatalProcessOutOfMemory("Handles"); \
- } \
- return; \
- } \
- } else { \
- if (__object__->IsOutOfMemoryFailure()) { \
- V8::FatalProcessOutOfMemory("Handles"); \
- } \
- UNREACHABLE(); \
- } \
- }
+#define CALL_HEAP_FUNCTION(FUNCTION_CALL, TYPE) \
+ CALL_AND_RETRY(FUNCTION_CALL, \
+ Handle<TYPE>(TYPE::cast(__object__)), \
+ Handle<TYPE>())
+
+
+#define CALL_HEAP_FUNCTION_VOID(FUNCTION_CALL) \
+ CALL_AND_RETRY(FUNCTION_CALL, , )
#ifdef DEBUG
int Heap::mc_count_ = 0;
int Heap::gc_count_ = 0;
+int Heap::always_allocate_scope_depth_ = 0;
+
#ifdef DEBUG
bool Heap::allocation_allowed_ = true;
// spaces.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- Object* result = AllocateRaw(HeapNumber::kSize, space);
+ Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
HeapObject::cast(result)->set_map(heap_number_map());
Object* Heap::AllocateHeapNumber(double value) {
+ // Use general version, if we're forced to always allocate.
+ if (always_allocate()) return AllocateHeapNumber(value, NOT_TENURED);
// This version of AllocateHeapNumber is optimized for
// allocation in new space.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space =
size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
- Object* result = AllocateRaw(size, space);
+ Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
Object* Heap::Allocate(Map* map, AllocationSpace space) {
ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(map->instance_type() != MAP_TYPE);
- Object* result = AllocateRaw(map->instance_size(), space);
+ Object* result = AllocateRaw(map->instance_size(),
+ space,
+ TargetSpaceId(map->instance_type()));
if (result->IsFailure()) return result;
HeapObject::cast(result)->set_map(map);
return result;
// Make the clone.
Map* map = boilerplate->map();
int object_size = map->instance_size();
- Object* result = new_space_.AllocateRaw(object_size);
+ Object* result = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
if (result->IsFailure()) return result;
- ASSERT(Heap::InNewSpace(result));
- // Copy the content.
+ // Copy the content. The arguments boilerplate doesn't have any
+ // fields that point to new space so it's safe to skip the write
+ // barrier here.
CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
reinterpret_cast<Object**>(boilerplate->address()),
object_size);
// Set the two properties.
JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
- callee,
- SKIP_WRITE_BARRIER);
+ callee);
JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
Smi::FromInt(length),
SKIP_WRITE_BARRIER);
// Make the clone.
Map* map = source->map();
int object_size = map->instance_size();
- Object* clone = new_space_.AllocateRaw(object_size);
- if (clone->IsFailure()) return clone;
- ASSERT(Heap::InNewSpace(clone));
-
- // Copy the content.
- CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
- reinterpret_cast<Object**>(source->address()),
- object_size);
+ Object* clone;
+
+ // If we're forced to always allocate, we use the general allocation
+ // functions which may leave us with an object in old space.
+ if (always_allocate()) {
+ clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
+ if (clone->IsFailure()) return clone;
+ Address clone_address = HeapObject::cast(clone)->address();
+ CopyBlock(reinterpret_cast<Object**>(clone_address),
+ reinterpret_cast<Object**>(source->address()),
+ object_size);
+ // Update write barrier for all fields that lie beyond the header.
+ for (int offset = JSObject::kHeaderSize;
+ offset < object_size;
+ offset += kPointerSize) {
+ RecordWrite(clone_address, offset);
+ }
+ } else {
+ clone = new_space_.AllocateRaw(object_size);
+ if (clone->IsFailure()) return clone;
+ ASSERT(Heap::InNewSpace(clone));
+ // Since we know the clone is allocated in new space, we can copy
+ // the contents without worring about updating the write barrier.
+ CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
+ reinterpret_cast<Object**>(source->address()),
+ object_size);
+ }
FixedArray* elements = FixedArray::cast(source->elements());
FixedArray* properties = FixedArray::cast(source->properties());
// Allocate string.
AllocationSpace space =
(size > MaxHeapObjectSize()) ? LO_SPACE : OLD_DATA_SPACE;
- Object* result = AllocateRaw(size, space);
+ Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
reinterpret_cast<HeapObject*>(result)->set_map(map);
// Use AllocateRaw rather than Allocate because the object's size cannot be
// determined from the map.
- Object* result = AllocateRaw(size, space);
+ Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
// Determine the map based on the string's length.
// Use AllocateRaw rather than Allocate because the object's size cannot be
// determined from the map.
- Object* result = AllocateRaw(size, space);
+ Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
// Determine the map based on the string's length.
Object* Heap::AllocateEmptyFixedArray() {
int size = FixedArray::SizeFor(0);
- Object* result = AllocateRaw(size, OLD_DATA_SPACE);
+ Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
// Initialize the object.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
Object* Heap::AllocateRawFixedArray(int length) {
+ // Use the general function if we're forced to always allocate.
+ if (always_allocate()) return AllocateFixedArray(length, NOT_TENURED);
// Allocate the raw data for a fixed array.
int size = FixedArray::SizeFor(length);
return (size > MaxHeapObjectSize())
} else {
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- result = AllocateRaw(size, space);
+ result = AllocateRaw(size, space, OLD_POINTER_SPACE);
}
if (result->IsFailure()) return result;
static MapSpace* map_space() { return map_space_; }
static LargeObjectSpace* lo_space() { return lo_space_; }
+ static bool always_allocate() { return always_allocate_scope_depth_ != 0; }
+ static Address always_allocate_scope_depth_address() {
+ return reinterpret_cast<Address>(&always_allocate_scope_depth_);
+ }
+
static Address* NewSpaceAllocationTopAddress() {
return new_space_.allocation_top_address();
}
// failed.
// Please note this function does not perform a garbage collection.
static inline Object* AllocateRaw(int size_in_bytes,
- AllocationSpace space);
+ AllocationSpace space,
+ AllocationSpace retry_space);
// Makes a new native code object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// Finds out which space an object should get promoted to based on its type.
static inline OldSpace* TargetSpace(HeapObject* object);
+ static inline AllocationSpace TargetSpaceId(InstanceType type);
// Sets the stub_cache_ (only used when expanding the dictionary).
static void set_code_stubs(Dictionary* value) { code_stubs_ = value; }
static int new_space_growth_limit_;
static int scavenge_count_;
+ static int always_allocate_scope_depth_;
+
static const int kMaxMapSpaceSize = 8*MB;
static NewSpace new_space_;
friend class Factory;
friend class DisallowAllocationFailure;
+ friend class AlwaysAllocateScope;
+};
+
+
+class AlwaysAllocateScope {
+ public:
+ AlwaysAllocateScope() {
+ // We shouldn't hit any nested scopes, because that requires
+ // non-handle code to call handle code. The code still works but
+ // performance will degrade, so we want to catch this situation
+ // in debug mode.
+ ASSERT(Heap::always_allocate_scope_depth_ == 0);
+ Heap::always_allocate_scope_depth_++;
+ }
+
+ ~AlwaysAllocateScope() {
+ Heap::always_allocate_scope_depth_--;
+ ASSERT(Heap::always_allocate_scope_depth_ == 0);
+ }
};
} else {
// Handle last resort GC and make sure to allow future allocations
// to grow the heap without causing GCs (if possible).
+ Counters::gc_last_resort_from_js.Increment();
Heap::CollectAllGarbage();
}
}
UNCLASSIFIED,
5,
"Heap::NewSpaceStart()");
- Add(ExternalReference::new_space_allocation_limit_address().address(),
+ Add(ExternalReference::heap_always_allocate_scope_depth().address(),
UNCLASSIFIED,
6,
+ "Heap::always_allocate_scope_depth()");
+ Add(ExternalReference::new_space_allocation_limit_address().address(),
+ UNCLASSIFIED,
+ 7,
"Heap::NewSpaceAllocationLimitAddress()");
Add(ExternalReference::new_space_allocation_top_address().address(),
UNCLASSIFIED,
- 7,
+ 8,
"Heap::NewSpaceAllocationTopAddress()");
Add(ExternalReference::debug_step_in_fp_address().address(),
UNCLASSIFIED,
- 8,
+ 9,
"Debug::step_in_fp_addr()");
}
} else if (IsLargeFixedArray(a)) {
o = Heap::lo_space()->AllocateRawFixedArray(size);
} else {
- o = Heap::AllocateRaw(size, space);
+ AllocationSpace retry_space = (space == NEW_SPACE)
+ ? Heap::TargetSpaceId(type)
+ : space;
+ o = Heap::AllocateRaw(size, space, retry_space);
}
ASSERT(!o->IsFailure());
// Check that the simulation of heap allocation was correct.
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
- if (Heap::OldGenerationAllocationLimitReached()) {
+ if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
return NULL;
}
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
- if (Heap::OldGenerationAllocationLimitReached()) {
+ if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
return NULL;
}
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
- if (Heap::OldGenerationAllocationLimitReached()) {
+ if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
return Failure::RetryAfterGC(requested_size, identity());
}
V8.GCCompactorCausedByOldspaceExhaustion) \
SC(gc_compactor_caused_by_weak_handles, \
V8.GCCompactorCausedByWeakHandles) \
+ SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
+ SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
/* How is the generic keyed-load stub used? */ \
SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
'test-ast.cc', 'test-heap.cc', 'test-utils.cc', 'test-compiler.cc',
'test-spaces.cc', 'test-mark-compact.cc', 'test-lock.cc',
'test-conversions.cc', 'test-strings.cc', 'test-serialize.cc',
- 'test-decls.cc'
+ 'test-decls.cc', 'test-alloc.cc'
],
'arch:arm': ['test-assembler-arm.cc', 'test-disasm-arm.cc'],
'arch:ia32': ['test-assembler-ia32.cc', 'test-disasm-ia32.cc'],
--- /dev/null
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "top.h"
+
+#include "cctest.h"
+
+
+using namespace v8::internal;
+
+
+static Object* AllocateAfterFailures() {
+ static int attempts = 0;
+ if (++attempts < 3) return Failure::RetryAfterGC(0);
+
+ // New space.
+ NewSpace* new_space = Heap::new_space();
+ static const int kNewSpaceFillerSize = ByteArray::SizeFor(0);
+ while (new_space->Available() > kNewSpaceFillerSize) {
+ CHECK(!Heap::AllocateByteArray(0)->IsFailure());
+ }
+ CHECK(!Heap::AllocateByteArray(100)->IsFailure());
+ CHECK(!Heap::AllocateFixedArray(100, NOT_TENURED)->IsFailure());
+
+ // Make sure we can allocate through optimized allocation functions
+ // for specific kinds.
+ CHECK(!Heap::AllocateFixedArray(100)->IsFailure());
+ CHECK(!Heap::AllocateHeapNumber(0.42)->IsFailure());
+ CHECK(!Heap::AllocateArgumentsObject(Smi::FromInt(87), 10)->IsFailure());
+ Object* object = Heap::AllocateJSObject(*Top::object_function());
+ CHECK(!Heap::CopyJSObject(JSObject::cast(object))->IsFailure());
+
+ // Old data space.
+ OldSpace* old_data_space = Heap::old_data_space();
+ static const int kOldDataSpaceFillerSize = SeqAsciiString::SizeFor(0);
+ while (old_data_space->Available() > kOldDataSpaceFillerSize) {
+ CHECK(!Heap::AllocateRawAsciiString(0, TENURED)->IsFailure());
+ }
+ CHECK(!Heap::AllocateRawAsciiString(100, TENURED)->IsFailure());
+
+ // Large object space.
+ while (!Heap::OldGenerationAllocationLimitReached()) {
+ CHECK(!Heap::AllocateFixedArray(10000, TENURED)->IsFailure());
+ }
+ CHECK(!Heap::AllocateFixedArray(10000, TENURED)->IsFailure());
+
+ // Map space.
+ MapSpace* map_space = Heap::map_space();
+ static const int kMapSpaceFillerSize = Map::kSize;
+ InstanceType instance_type = JS_OBJECT_TYPE;
+ int instance_size = JSObject::kHeaderSize;
+ while (map_space->Available() > kMapSpaceFillerSize) {
+ CHECK(!Heap::AllocateMap(instance_type, instance_size)->IsFailure());
+ }
+ CHECK(!Heap::AllocateMap(instance_type, instance_size)->IsFailure());
+
+ // Test that we can allocate in old pointer space and code space.
+ CHECK(!Heap::AllocateFixedArray(100, TENURED)->IsFailure());
+ CHECK(!Heap::CopyCode(Builtins::builtin(Builtins::Illegal))->IsFailure());
+
+ // Return success.
+ return Smi::FromInt(42);
+}
+
+static Handle<Object> Test() {
+ CALL_HEAP_FUNCTION(AllocateAfterFailures(), Object);
+}
+
+
+TEST(Stress) {
+ v8::Persistent<v8::Context> env = v8::Context::New();
+ v8::HandleScope scope;
+ env->Enter();
+ Handle<Object> o = Test();
+ CHECK(o->IsSmi() && Smi::cast(*o)->value() == 42);
+ env->Exit();
+}
>
</File>
<File
+ RelativePath="..\..\test\cctest\test-alloc.cc"
+ >
+ </File>
+ <File
RelativePath="..\..\test\cctest\test-api.cc"
>
</File>