*/
class V8EXPORT AssertNoGCScope {
#ifndef DEBUG
- V8_INLINE(AssertNoGCScope(Isolate* isolate)) {}
+ // TODO(yangguo): remove isolate argument.
+ V8_INLINE(AssertNoGCScope(Isolate* isolate)) { }
#else
AssertNoGCScope(Isolate* isolate);
~AssertNoGCScope();
private:
- Isolate* isolate_;
- bool last_state_;
+ void* disallow_heap_allocation_;
#endif
};
class FrameFunctionIterator {
public:
- FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise)
+ FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise)
: frame_iterator_(isolate),
functions_(2),
index_(0) {
MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
if (holder == NULL) return isolate->heap()->undefined_value();
if (holder->shared()->native()) return isolate->heap()->null_value();
Handle<JSFunction> function(holder, isolate);
- FrameFunctionIterator it(isolate, no_alloc);
+ FrameFunctionIterator it(isolate, no_allocation);
// Find the function from the frames.
if (!it.Find(*function)) {
#include "../include/v8-debug.h"
#include "../include/v8-profiler.h"
#include "../include/v8-testing.h"
+#include "assert-scope.h"
#include "bootstrapper.h"
#include "code-stubs.h"
#include "compiler.h"
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId");
- i::AssertNoAllocation no_allocation;
+ i::DisallowHeapAllocation no_allocation;
VisitorAdapter visitor_adapter(visitor);
isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter);
ASSERT(isolate == i::Isolate::Current());
IsDeadCheck(isolate, "v8::V8::VisitHandlesForPartialDependence");
- i::AssertNoAllocation no_allocation;
+ i::DisallowHeapAllocation no_allocation;
VisitorAdapter visitor_adapter(visitor);
isolate->global_handles()->IterateAllRootsInNewSpaceWithClassIds(
#ifdef DEBUG
-v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate)
- : isolate_(isolate),
- last_state_(i::EnterAllocationScope(
- reinterpret_cast<i::Isolate*>(isolate), false)) {
+v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate) {
+ disallow_heap_allocation_ = new i::DisallowHeapAllocation();
}
v8::AssertNoGCScope::~AssertNoGCScope() {
- i::ExitAllocationScope(reinterpret_cast<i::Isolate*>(isolate_), last_state_);
+ delete static_cast<i::DisallowHeapAllocation*>(disallow_heap_allocation_);
}
#endif
while (!blocks_.is_empty()) {
Object** block_start = blocks_.last();
Object** block_limit = &block_start[kHandleBlockSize];
- // We should not need to check for NoHandleAllocation here. Assert
- // this.
+ // We should not need to check for SealHandleScope here. Assert this.
ASSERT(prev_limit == block_limit ||
!(block_start <= prev_limit && prev_limit <= block_limit));
if (prev_limit == block_limit) break;
internal::Object** block_start = blocks_.last();
internal::Object** block_limit = block_start + kHandleBlockSize;
#ifdef DEBUG
- // NoHandleAllocation may make the prev_limit to point inside the block.
+ // SealHandleScope may make the prev_limit to point inside the block.
if (block_start <= prev_limit && prev_limit <= block_limit) {
#ifdef ENABLE_EXTRA_CHECKS
internal::HandleScope::ZapRange(prev_limit, block_limit);
#ifdef DEBUG
Isolate* isolate = Isolate::Current();
#endif
- ALLOW_HANDLE_DEREF(isolate, "using and embedding raw address");
+ AllowDeferredHandleDereference using_raw_address;
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
JSFunction* function) {
Isolate* isolate = function->GetIsolate();
HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { ALLOW_HANDLE_DEREF(isolate(),
- "copying a ZoneList of handles into a FixedArray");
+ { AllowDeferredHandleDereference copy_handles;
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value();
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (value->IsSmi()) {
__ mov(ToRegister(instr->result()), Operand(value));
} else {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
Register reg = ToRegister(instr->value());
Handle<JSFunction> target = instr->hydrogen()->target();
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value());
Handle<JSGlobalPropertyCell> cell =
Condition cond) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}
RelocInfo::Mode rmode,
TypeFeedbackId ast_id,
Condition cond) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
// 'code' is always generated ARM code, never THUMB code
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
}
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
void LoadHeapObject(Register dst, Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
- ALLOW_HANDLE_DEREF(isolate(), "heap object check");
+ AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ASSERT_SCOPE_H_
+#define V8_ASSERT_SCOPE_H_
+
+#include "allocation.h"
+#include "platform.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+enum PerThreadAssertType {
+ HEAP_ALLOCATION_ASSERT,
+ HANDLE_ALLOCATION_ASSERT,
+ HANDLE_DEREFERENCE_ASSERT,
+ DEFERRED_HANDLE_DEREFERENCE_ASSERT,
+ LAST_PER_THREAD_ASSERT_TYPE
+};
+
+
+#ifdef DEBUG
+class PerThreadAssertData {
+ public:
+ PerThreadAssertData() {
+ for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {
+ assert_states_[i] = true;
+ }
+ }
+
+ void set(PerThreadAssertType type, bool allow) {
+ assert_states_[type] = allow;
+ }
+
+ bool get(PerThreadAssertType type) const {
+ return assert_states_[type];
+ }
+
+ private:
+ bool assert_states_[LAST_PER_THREAD_ASSERT_TYPE];
+
+ DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData);
+};
+#endif // DEBUG
+
+
+class PerThreadAssertScopeBase {
+#ifdef DEBUG
+ protected:
+ static PerThreadAssertData* AssertData() {
+ PerThreadAssertData* data = reinterpret_cast<PerThreadAssertData*>(
+ Thread::GetThreadLocal(thread_local_key));
+ if (data == NULL) {
+ data = new PerThreadAssertData();
+ Thread::SetThreadLocal(thread_local_key, data);
+ }
+ return data;
+ }
+
+ static Thread::LocalStorageKey thread_local_key;
+ friend class Isolate;
+#endif // DEBUG
+};
+
+
+
+template <PerThreadAssertType type, bool allow>
+class PerThreadAssertScope : public PerThreadAssertScopeBase {
+ public:
+#ifndef DEBUG
+ PerThreadAssertScope() { }
+ static void SetIsAllowed(bool is_allowed) { }
+#else
+ PerThreadAssertScope() {
+ PerThreadAssertData* data = AssertData();
+ old_state_ = data->get(type);
+ data->set(type, allow);
+ }
+
+ ~PerThreadAssertScope() { AssertData()->set(type, old_state_); }
+
+ static bool IsAllowed() { return AssertData()->get(type); }
+
+ private:
+ bool old_state_;
+#endif
+};
+
+// Scope to document where we do not expect handles to be created.
+typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>
+ DisallowHandleAllocation;
+
+// Scope to introduce an exception to DisallowHandleAllocation.
+typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>
+ AllowHandleAllocation;
+
+// Scope to document where we do not expect any allocation and GC.
+typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, false>
+ DisallowHeapAllocation;
+
+// Scope to introduce an exception to DisallowHeapAllocation.
+typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, true>
+ AllowHeapAllocation;
+
+// Scope to document where we do not expect any handle dereferences.
+typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>
+ DisallowHandleDereference;
+
+// Scope to introduce an exception to DisallowHandleDereference.
+typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>
+ AllowHandleDereference;
+
+// Scope to document where we do not expect deferred handles to be dereferenced.
+typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
+ DisallowDeferredHandleDereference;
+
+// Scope to introduce an exception to DisallowDeferredHandleDereference.
+typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
+ AllowDeferredHandleDereference;
+
+} } // namespace v8::internal
+
+#endif // V8_ASSERT_SCOPE_H_
}
// Add the provided values.
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int index = 0; index < to_add; index++) {
elms->set(index + len, args[index + 1], mode);
}
// Add the provided values.
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
int index;
for (index = 0; index < to_add; index++) {
Object* arg = args[index + 1];
// Shift the elements.
if (elms_obj->IsFixedArray()) {
FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
heap->MoveElements(elms, 0, 1, len - 1);
elms->set(len - 1, heap->the_hole_value());
} else {
elms = new_elms;
array->set_elements(elms);
} else {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
heap->MoveElements(elms, to_add, 0, len);
}
// Add the provided values.
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int i = 0; i < to_add; i++) {
elms->set(i, args[i + 1], mode);
result_len,
result_len);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
if (result_len == 0) return maybe_array;
if (!maybe_array->To(&result_array)) return maybe_array;
if (!maybe_array->To(&result_array)) return maybe_array;
if (actual_delete_count > 0) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_failure = accessor->CopyElements(
NULL, actual_start, elements_kind, result_array->elements(),
MoveDoubleElements(elms, delta, elms, 0, actual_start);
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
heap->MoveElements(elms, delta, 0, actual_start);
}
FillWithHoles(elms, new_length, len);
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
heap->MoveElements(elms, actual_start + item_count,
actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->To(&new_elms)) return maybe_obj;
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
ElementsKind kind = array->GetElementsKind();
ElementsAccessor* accessor = array->GetElementsAccessor();
elms_obj = new_elms;
elms_changed = true;
} else {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
heap->MoveElements(elms, actual_start + item_count,
actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
}
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int k = actual_start; k < actual_start + item_count; k++) {
elms->set(k, args[3 + k - actual_start], mode);
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
+ i::AllowHandleDereference allow_deref;
+ i::AllowDeferredHandleDereference allow_deferred_deref;
fflush(stdout);
fflush(stderr);
fatal_error_handler_nesting_depth++;
static LChunk* OptimizeGraph(HGraph* graph) {
- Isolate* isolate = graph->isolate();
- AssertNoAllocation no_gc;
- NoHandleAllocation no_handles(isolate);
- HandleDereferenceGuard no_deref(isolate, HandleDereferenceGuard::DISALLOW);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
ASSERT(graph != NULL);
SmartArrayPointer<char> bailout_reason;
}
OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
- AssertNoAllocation no_gc;
- NoHandleAllocation no_handles(isolate());
- HandleDereferenceGuard no_deref(isolate(), HandleDereferenceGuard::DISALLOW);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
ASSERT(last_status() == SUCCEEDED);
Timer t(this, &time_taken_to_optimize_);
// graph creation. To make sure that we don't encounter inconsistencies
// between graph creation and code generation, we disallow accessing
// objects through deferred handles during the latter, with exceptions.
- HandleDereferenceGuard no_deref_deferred(
- isolate(), HandleDereferenceGuard::DISALLOW_DEFERRED);
+ DisallowDeferredHandleDereference no_deferred_handle_deref();
Handle<Code> optimized_code = chunk_->Codegen();
if (optimized_code.is_null()) {
info()->set_bailout_reason("code generation failed");
void BreakLocationIterator::Next() {
- AssertNoAllocation nogc;
+ DisallowHeapAllocation no_gc;
ASSERT(!RinfoDone());
// Iterate through reloc info for code and original code stopping at each
// Ensure no GC in this scope as we are going to use gc_metadata
// field in the Code object to mark active functions.
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Object* active_code_marker = heap->the_hole_value();
while (!done) {
{ // Extra scope for iterator and no-allocation.
heap->EnsureHeapIsIterable();
- AssertNoAllocation no_alloc_during_heap_iteration;
+ DisallowHeapAllocation no_alloc_during_heap_iteration;
HeapIterator iterator(heap);
for (HeapObject* obj = iterator.next();
obj != NULL; obj = iterator.next()) {
// Scan heap for Script objects.
int count = 0;
HeapIterator iterator(heap);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
Context* context, OptimizedFunctionVisitor* visitor) {
Isolate* isolate = context->GetIsolate();
ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
ASSERT(context->IsNativeContext());
void Deoptimizer::VisitAllOptimizedFunctions(
Isolate* isolate,
OptimizedFunctionVisitor* visitor) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
// Run through the list of all native contexts and deoptimize.
Object* context = isolate->heap()->native_contexts_list();
ZoneList<Code*>* partitions,
Zone* zone,
Object* undefined) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Object* current = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
Object* remainder_head = undefined;
Object* remainder_tail = undefined;
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
if (FLAG_trace_deopt) {
PrintF("[deoptimize all contexts]\n");
void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
DeoptimizeAllFilter filter;
if (object->IsJSGlobalProxy()) {
Object* proto = object->GetPrototype();
void Deoptimizer::DeoptimizeAllFunctionsWith(Isolate* isolate,
OptimizedFunctionFilter* filter) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
// Run through the list of all native contexts and deoptimize.
Object* context = isolate->heap()->native_contexts_list();
? StackFrame::STUB
: StackFrame::JAVA_SCRIPT;
trace_ = TraceEnabledFor(type, frame_type);
- ASSERT(HEAP->allow_allocation(false));
+#ifdef DEBUG
+ CHECK(AllowHeapAllocation::IsAllowed());
+ disallow_heap_allocation_ = new DisallowHeapAllocation();
+#endif // DEBUG
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
input_->SetFrameType(frame_type);
Deoptimizer::~Deoptimizer() {
ASSERT(input_ == NULL && output_ == NULL);
+ ASSERT(disallow_heap_allocation_ == NULL);
}
delete[] output_;
input_ = NULL;
output_ = NULL;
- ASSERT(!HEAP->allow_allocation(true));
+#ifdef DEBUG
+ CHECK(!AllowHeapAllocation::IsAllowed());
+ CHECK(disallow_heap_allocation_ != NULL);
+ delete disallow_heap_allocation_;
+ disallow_heap_allocation_ = NULL;
+#endif // DEBUG
}
JavaScriptFrame* frame,
int inlined_jsframe_index,
int formal_parameter_count) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
List<Object*> deferred_arguments_objects_values_;
List<ArgumentsObjectMaterializationDescriptor> deferred_arguments_objects_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
+#ifdef DEBUG
+ DisallowHeapAllocation* disallow_heap_allocation_;
+#endif // DEBUG
bool trace_;
const V8NameConverter& converter,
byte* begin,
byte* end) {
- NoHandleAllocation ha(isolate);
- AssertNoAllocation no_alloc;
+ SealHandleScope shs(isolate);
+ DisallowHeapAllocation no_alloc;
ExternalReferenceEncoder ref_encoder;
Heap* heap = HEAP;
uint32_t to_start,
int raw_copy_size) {
ASSERT(to_base->map() != HEAP->fixed_cow_array_map());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
uint32_t to_start,
int raw_copy_size) {
SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
Heap* heap = from->GetHeap();
if (raw_copy_size < 0) {
// Fill in the content
{
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len0; i++) {
Object* e = to->get(i);
}
case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
FixedArray* object_elms = FixedArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
SaveContext save(isolate);
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub.
Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
Handle<Object> value) {
- ALLOW_HANDLE_DEREF(isolate(),
- "converting a handle into a global property cell");
+ AllowDeferredHandleDereference convert_to_cell;
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSGlobalPropertyCell(*value),
bool print_line_number) {
// constructor calls
HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
JavaScriptFrameIterator it(isolate);
while (!it.done()) {
if (it.frame()->is_java_script()) {
#ifdef DEBUG
template <typename T>
-bool Handle<T>::IsDereferenceAllowed(bool allow_deferred) const {
+bool Handle<T>::IsDereferenceAllowed(bool explicitly_allow_deferred) const {
ASSERT(location_ != NULL);
Object* object = *BitCast<T**>(location_);
if (object->IsSmi()) return true;
handle < roots_array_start + Heap::kStrongRootListLength) {
return true;
}
- if (isolate->optimizing_compiler_thread()->IsOptimizerThread() &&
- !Heap::RelocationLock::IsLockedByOptimizerThread(isolate->heap())) {
- return false;
+ if (!AllowHandleDereference::IsAllowed()) return false;
+ if (!explicitly_allow_deferred &&
+ !AllowDeferredHandleDereference::IsAllowed()) {
+ // Accessing maps and internalized strings is safe.
+ if (heap_object->IsMap()) return true;
+ if (heap_object->IsInternalizedString()) return true;
+ return !isolate->IsDeferredHandle(handle);
}
- switch (isolate->HandleDereferenceGuardState()) {
- case HandleDereferenceGuard::ALLOW:
- return true;
- case HandleDereferenceGuard::DISALLOW:
- return false;
- case HandleDereferenceGuard::DISALLOW_DEFERRED:
- // Accessing maps and internalized strings is safe.
- if (heap_object->IsMap()) return true;
- if (heap_object->IsInternalizedString()) return true;
- return allow_deferred || !isolate->IsDeferredHandle(handle);
- }
- return false;
+ return true;
}
#endif
template <typename T>
T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
+ ASSERT(AllowHandleAllocation::IsAllowed());
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
#ifdef DEBUG
-inline NoHandleAllocation::NoHandleAllocation(Isolate* isolate)
- : isolate_(isolate) {
- active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active_) {
- // Shrink the current handle scope to make it impossible to do
- // handle allocations without an explicit handle scope.
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- limit_ = current->limit;
- current->limit = current->next;
- level_ = current->level;
- current->level = 0;
- }
-}
-
-
-inline NoHandleAllocation::~NoHandleAllocation() {
- if (active_) {
- // Restore state in current handle scope to re-enable handle
- // allocations.
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate_->handle_scope_data();
- ASSERT_EQ(0, current->level);
- current->level = level_;
- ASSERT_EQ(current->next, current->limit);
- current->limit = limit_;
- }
-}
-
+inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
+ // Make sure the current thread is allowed to create handles to begin with.
+ CHECK(AllowHandleAllocation::IsAllowed());
+ v8::ImplementationUtilities::HandleScopeData* current =
+ isolate_->handle_scope_data();
+ // Shrink the current handle scope to make it impossible to do
+ // handle allocations without an explicit handle scope.
+ current->limit = current->next;
-HandleDereferenceGuard::HandleDereferenceGuard(Isolate* isolate, State state)
- : isolate_(isolate) {
- old_state_ = isolate_->HandleDereferenceGuardState();
- isolate_->SetHandleDereferenceGuardState(state);
+ level_ = current->level;
+ current->level = 0;
}
-HandleDereferenceGuard::~HandleDereferenceGuard() {
- isolate_->SetHandleDereferenceGuardState(old_state_);
+inline SealHandleScope::~SealHandleScope() {
+ // Restore state in current handle scope to re-enable handle
+ // allocations.
+ v8::ImplementationUtilities::HandleScopeData* data =
+ isolate_->handle_scope_data();
+ ASSERT_EQ(0, data->level);
+ data->level = level_;
}
#endif
List<int> line_ends(line_count_estimate);
Isolate* isolate = src->GetIsolate();
{
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid.
+ DisallowHeapAllocation no_allocation; // ensure vectors stay valid.
// Dispatch on type of strings.
String::FlatContent content = src->GetFlatContent();
ASSERT(content.IsFlat());
// Convert code position into line number.
int GetScriptLineNumber(Handle<Script> script, int code_pos) {
InitScriptLineEnds(script);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
const int line_ends_len = line_ends_array->length();
int line_number = GetScriptLineNumber(script, code_pos);
if (line_number == -1) return -1;
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
line_number = line_number - script->line_offset()->value();
if (line_number == 0) return code_pos + script->column_offset()->value();
}
int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
if (!script->line_ends()->IsUndefined()) {
return GetScriptLineNumber(script, code_pos);
}
inline Handle<T> EscapeFrom(v8::HandleScope* scope);
#ifdef DEBUG
- bool IsDereferenceAllowed(bool allow_deferred) const;
+ bool IsDereferenceAllowed(bool explicitly_allow_deferred) const;
#endif // DEBUG
private:
Handle<Object> key,
Handle<Object> value);
-class NoHandleAllocation BASE_EMBEDDED {
+
+// Seal off the current HandleScope so that new handles can only be created
+// if a new HandleScope is entered.
+class SealHandleScope BASE_EMBEDDED {
public:
#ifndef DEBUG
- explicit NoHandleAllocation(Isolate* isolate) {}
- ~NoHandleAllocation() {}
+ explicit SealHandleScope(Isolate* isolate) {}
+ ~SealHandleScope() {}
#else
- explicit inline NoHandleAllocation(Isolate* isolate);
- inline ~NoHandleAllocation();
+ explicit inline SealHandleScope(Isolate* isolate);
+ inline ~SealHandleScope();
private:
Isolate* isolate_;
Object** limit_;
#endif
};
-
-class HandleDereferenceGuard BASE_EMBEDDED {
- public:
- enum State { ALLOW, DISALLOW, DISALLOW_DEFERRED };
-#ifndef DEBUG
- HandleDereferenceGuard(Isolate* isolate, State state) { }
- ~HandleDereferenceGuard() { }
-#else
- inline HandleDereferenceGuard(Isolate* isolate, State state);
- inline ~HandleDereferenceGuard();
- private:
- Isolate* isolate_;
- State old_state_;
-#endif
-};
-
-#ifdef DEBUG
-#define ALLOW_HANDLE_DEREF(isolate, why_this_is_safe) \
- HandleDereferenceGuard allow_deref(isolate, \
- HandleDereferenceGuard::ALLOW);
-#else
-#define ALLOW_HANDLE_DEREF(isolate, why_this_is_safe)
-#endif // DEBUG
-
} } // namespace v8::internal
#endif // V8_HANDLES_H_
MaybeObject* Heap::AllocateRaw(int size_in_bytes,
AllocationSpace space,
AllocationSpace retry_space) {
- SLOW_ASSERT(!isolate_->optimizing_compiler_thread()->IsOptimizerThread());
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+ ASSERT(AllowHandleAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
ASSERT(space != NEW_SPACE ||
retry_space == OLD_POINTER_SPACE ||
retry_space == OLD_DATA_SPACE ||
return __maybe_object__)
-#ifdef DEBUG
-
-inline bool Heap::allow_allocation(bool new_state) {
- bool old = allocation_allowed_;
- allocation_allowed_ = new_state;
- return old;
-}
-
-inline void Heap::set_allow_allocation(bool allocation_allowed) {
- allocation_allowed_ = allocation_allowed;
-}
-
-#endif
-
-
void ExternalStringTable::AddString(String* string) {
ASSERT(string->IsExternalString());
if (heap_->InNewSpace(string)) {
}
-#ifdef DEBUG
-bool EnterAllocationScope(Isolate* isolate, bool allow_allocation) {
- bool active = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- bool last_state = isolate->heap()->IsAllocationAllowed();
- if (active) {
- // TODO(yangguo): Make HandleDereferenceGuard avoid isolate mutation in the
- // same way if running on the optimizer thread.
- isolate->heap()->set_allow_allocation(allow_allocation);
- }
- return last_state;
-}
-
-
-void ExitAllocationScope(Isolate* isolate, bool last_state) {
- bool active = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
- if (active) {
- isolate->heap()->set_allow_allocation(last_state);
- }
-}
-
-
-AssertNoAllocation::AssertNoAllocation()
- : last_state_(EnterAllocationScope(ISOLATE, false)) {
-}
-
-AssertNoAllocation::~AssertNoAllocation() {
- ExitAllocationScope(ISOLATE, last_state_);
-}
-
-DisableAssertNoAllocation::DisableAssertNoAllocation()
- : last_state_(EnterAllocationScope(ISOLATE, true)) {
-}
-
-DisableAssertNoAllocation::~DisableAssertNoAllocation() {
- ExitAllocationScope(ISOLATE, last_state_);
-}
-#else
-
-AssertNoAllocation::AssertNoAllocation() { }
-AssertNoAllocation::~AssertNoAllocation() { }
-DisableAssertNoAllocation::DisableAssertNoAllocation() { }
-DisableAssertNoAllocation::~DisableAssertNoAllocation() { }
-
-#endif
-
-
} } // namespace v8::internal
#endif // V8_HEAP_INL_H_
// First perform a full GC in order to avoid dead objects.
HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"HeapSnapshotsCollection::FindHeapObjectById");
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
HeapObject* object = NULL;
HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
// Make sure that object with the given id is still reachable.
}
}
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
for (int i = 0, l = enumerator.count(); i < l; ++i) {
objects_tags_.SetTag(*enumerator.at(i), urls[i]);
}
// The following code uses heap iterators, so we want the heap to be
// stable. It should follow TagGlobalObjects as that can allocate.
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_alloc;
#ifdef VERIFY_HEAP
debug_heap->Verify();
remembered_unmapped_pages_index_(0),
unflattened_strings_length_(0),
#ifdef DEBUG
- allocation_allowed_(true),
allocation_timeout_(0),
disallow_allocation_failure_(false),
#endif // DEBUG
void Heap::GarbageCollectionPrologue() {
- isolate_->transcendental_cache()->Clear();
- ClearJSFunctionResultCaches();
- gc_count_++;
- unflattened_strings_length_ = 0;
+ { AllowHeapAllocation for_the_first_part_of_prologue;
+ isolate_->transcendental_cache()->Clear();
+ ClearJSFunctionResultCaches();
+ gc_count_++;
+ unflattened_strings_length_ = 0;
- if (FLAG_flush_code && FLAG_flush_code_incrementally) {
- mark_compact_collector()->EnableCodeFlushing(true);
- }
+ if (FLAG_flush_code && FLAG_flush_code_incrementally) {
+ mark_compact_collector()->EnableCodeFlushing(true);
+ }
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Verify();
- }
+ if (FLAG_verify_heap) {
+ Verify();
+ }
#endif
+ }
#ifdef DEBUG
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
- allow_allocation(false);
+ ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
if (FLAG_gc_verbose) Print();
}
#endif
+ AllowHeapAllocation for_the_rest_of_the_epilogue;
+
#ifdef DEBUG
- allow_allocation(true);
if (FLAG_print_global_handles) isolate_->global_handles()->Print();
if (FLAG_print_handles) PrintHandles();
if (FLAG_gc_verbose) Print();
bool next_gc_likely_to_collect_more = false;
{ GCTracer tracer(this, gc_reason, collector_reason);
+ ASSERT(AllowHeapAllocation::IsAllowed());
+ DisallowHeapAllocation no_allocation_during_gc;
GarbageCollectionPrologue();
// The GC count was incremented in the prologue. Tell the tracer about
// it.
ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
gc_post_processing_depth_++;
- { DisableAssertNoAllocation allow_allocation;
+ { AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
next_gc_likely_to_collect_more =
isolate_->global_handles()->PostGarbageCollectionProcessing(
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
// Both the external string table and the string table may contain
// external strings, but neither lists them exhaustively, nor is the
// This version of AllocateHeapNumber is optimized for
// allocation in new space.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
Object* result;
{ MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
if (!maybe_result->ToObject(&result)) return maybe_result;
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
ConsString* cons_string = ConsString::cast(result);
WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
cons_string->set_length(length);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
SlicedString* sliced_string = SlicedString::cast(result);
sliced_string->set_length(length);
sliced_string->set_hash_field(String::kEmptyHashField);
// This calls Copy directly rather than using Heap::AllocateRaw so we
// duplicate the check here.
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
+ ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
// Check that the size of the boilerplate matches our
// expectations. The ArgumentsAccessStub::GenerateNewObject relies
result->set_length(len);
// Copy the content
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
return result;
void Heap::EnsureHeapIsIterable() {
- ASSERT(IsAllocationAllowed());
+ ASSERT(AllowHeapAllocation::IsAllowed());
if (!IsHeapIterable()) {
CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
}
visitor.TransitiveClosure();
}
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation_;
};
Object* object = list_[i];
JSFunction* getter_fun;
- { AssertNoAllocation assert;
+ { DisallowHeapAllocation no_gc;
// Skip possible holes in the list.
if (object->IsTheHole()) continue;
if (isolate->heap()->InNewSpace(object) || budget == 0) {
#include <cmath>
#include "allocation.h"
+#include "assert-scope.h"
#include "globals.h"
#include "incremental-marking.h"
#include "list.h"
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
#ifdef DEBUG
- bool IsAllocationAllowed() { return allocation_allowed_; }
- inline void set_allow_allocation(bool allocation_allowed);
- inline bool allow_allocation(bool enable);
-
bool disallow_allocation_failure() {
return disallow_allocation_failure_;
}
#undef ROOT_ACCESSOR
#ifdef DEBUG
- bool allocation_allowed_;
-
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
};
-// A helper class to document/test C++ scopes where we do not
-// expect a GC. Usage:
-//
-// /* Allocation not allowed: we cannot handle a GC in this scope. */
-// { AssertNoAllocation nogc;
-// ...
-// }
-
-#ifdef DEBUG
-inline bool EnterAllocationScope(Isolate* isolate, bool allow_allocation);
-inline void ExitAllocationScope(Isolate* isolate, bool last_state);
-#endif
-
-
-class AssertNoAllocation {
- public:
- inline AssertNoAllocation();
- inline ~AssertNoAllocation();
-
-#ifdef DEBUG
- private:
- bool last_state_;
-#endif
-};
-
-
-class DisableAssertNoAllocation {
- public:
- inline DisableAssertNoAllocation();
- inline ~DisableAssertNoAllocation();
-
-#ifdef DEBUG
- private:
- bool last_state_;
-#endif
-};
-
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
what_to_find_(what_to_find),
visit_mode_(visit_mode),
object_stack_(20),
- no_alloc() {}
+ no_allocation() {}
virtual void VisitPointers(Object** start, Object** end);
VisitMode visit_mode_;
List<Object*> object_stack_;
- AssertNoAllocation no_alloc; // i.e. no gc allowed.
+ DisallowHeapAllocation no_allocation; // i.e. no gc allowed.
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
block_side_effects_(graph->blocks()->length(), graph->zone()),
loop_side_effects_(graph->blocks()->length(), graph->zone()),
visited_on_paths_(graph->zone(), graph->blocks()->length()) {
-#ifdef DEBUG
- ASSERT(info->isolate()->optimizing_compiler_thread()->IsOptimizerThread() ||
- !info->isolate()->heap()->IsAllocationAllowed());
-#endif
+ ASSERT(!AllowHandleAllocation::IsAllowed());
block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
graph_->zone());
loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
// to be tenured so that it's guaranteed not be be located in new space.
handle_ = FACTORY->NewNumber(double_value_, TENURED);
}
- ALLOW_HANDLE_DEREF(Isolate::Current(), "smi check");
+ AllowDeferredHandleDereference smi_check;
ASSERT(has_int32_value_ || !handle_->IsSmi());
return handle_;
}
void HGraph::Verify(bool do_full_verify) const {
Heap::RelocationLock(isolate()->heap());
- ALLOW_HANDLE_DEREF(isolate(), "debug mode verification");
+ AllowDeferredHandleDereference allow_deferred_deref;
for (int i = 0; i < blocks_.length(); i++) {
HBasicBlock* block = blocks_.at(i);
void HGraph::FinalizeUniqueValueIds() {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstruction* instr = blocks()->at(i)->first();
if (FLAG_trace_dead_code_elimination) {
HeapStringAllocator allocator;
StringStream stream(&allocator);
- ALLOW_HANDLE_DEREF(isolate(), "debug mode printing");
+ AllowDeferredHandleDereference debug_output;
if (ref != NULL) {
ref->PrintTo(&stream);
} else {
void HTracer::TraceLithium(const char* name, LChunk* chunk) {
ASSERT(!FLAG_parallel_recompilation);
- ALLOW_HANDLE_DEREF(chunk->isolate(), "debug output");
+ AllowDeferredHandleDereference debug_output;
Trace(name, chunk->graph(), chunk);
}
void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
ASSERT(!FLAG_parallel_recompilation);
- ALLOW_HANDLE_DEREF(graph->isolate(), "debug output");
+ AllowDeferredHandleDereference debug_output;
Trace(name, graph, NULL);
}
#ifdef DEBUG
Isolate* isolate = Isolate::Current();
#endif
- ALLOW_HANDLE_DEREF(isolate,
- "using and embedding raw address, heap object check");
+ AllowDeferredHandleDereference using_raw_address;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
ASSERT(!isolate->heap()->InNewSpace(obj));
void Assembler::emit(Handle<Object> handle) {
- ALLOW_HANDLE_DEREF(isolate(), "heap object check");
+ AllowDeferredHandleDereference heap_object_check;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
ASSERT(!isolate()->heap()->InNewSpace(obj));
void Assembler::emit(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId id) {
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
emit(reinterpret_cast<intptr_t>(code.location()), rmode, id);
}
}
static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
- ALLOW_HANDLE_DEREF(Isolate::Current(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
return Operand(reinterpret_cast<int32_t>(cell.location()),
RelocInfo::GLOBAL_PROPERTY_CELL);
}
JSFunction* function) {
Isolate* isolate = function->GetIsolate();
HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation nha;
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { ALLOW_HANDLE_DEREF(isolate(),
- "copying a ZoneList of handles into a FixedArray");
+ { AllowDeferredHandleDereference copy_handles;
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
void LCodeGen::DoConstantT(LConstantT* instr) {
Register reg = ToRegister(instr->result());
Handle<Object> handle = instr->value();
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (handle->IsHeapObject()) {
__ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
} else {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (object->IsSmi()) {
__ Push(Handle<Smi>::cast(object));
} else {
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
void PushHeapObject(Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
- ALLOW_HANDLE_DEREF(isolate(), "heap object check");
+ AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
}
void CmpObject(Register reg, Handle<Object> object) {
- ALLOW_HANDLE_DEREF(isolate(), "heap object check");
+ AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
CmpHeapObject(reg, Handle<HeapObject>::cast(object));
} else {
int handler_to_overwrite = -1;
Handle<Map> new_receiver_map(receiver->map());
{
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
target()->FindAllMaps(&receiver_maps);
int number_of_maps = receiver_maps.length();
number_of_valid_maps = number_of_maps;
MapHandleList receiver_maps;
CodeHandleList handlers;
{
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
target()->FindAllMaps(&receiver_maps);
target()->FindAllCode(&handlers, receiver_maps.length());
}
bool IC::IsTransitionedMapOfMonomorphicTarget(Map* receiver_map) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Map* current_map = target()->FindFirstMap();
ElementsKind receiver_elements_kind = receiver_map->elements_kind();
if (target()->is_load_stub()) {
bool is_same_handler = false;
{
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Code* old_handler = target()->FindFirstCode();
is_same_handler = old_handler == *code;
}
break;
}
case POLYMORPHIC: {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(*stub, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
- NoHandleAllocation nha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
JSArray* receiver = JSArray::cast(args[0]);
// it is necessary to extend the properties array of a
// JSObject.
RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
// Convert the parameters
RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
int start_position) {
ASSERT(subject->IsFlat());
- AssertNoAllocation a;
+ DisallowHeapAllocation no_gc;
const byte* code_base = code_array->GetDataStartAddress();
uc16 previous_char = '\n';
String::FlatContent subject_content = subject->GetFlatContent();
Thread::LocalStorageKey Isolate::isolate_key_;
Thread::LocalStorageKey Isolate::thread_id_key_;
Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
+#ifdef DEBUG
+Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
+#endif // DEBUG
Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
Atomic32 Isolate::isolate_counter_ = 0;
isolate_key_ = Thread::CreateThreadLocalKey();
thread_id_key_ = Thread::CreateThreadLocalKey();
per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
+#ifdef DEBUG
+ PerThreadAssertScopeBase::thread_local_key = Thread::CreateThreadLocalKey();
+#endif // DEBUG
thread_data_table_ = new Isolate::ThreadDataTable();
default_isolate_ = new Isolate();
}
return;
}
// The MentionedObjectCache is not GC-proof at the moment.
- AssertNoAllocation nogc;
+ DisallowHeapAllocation no_gc;
ASSERT(StringStream::IsMentionedObjectCacheClear());
// Avoid printing anything if there are no frames.
ASSERT(receiver->IsAccessCheckNeeded());
// The callers of this method are not expecting a GC.
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
// Skip checks for hidden properties access. Note, we do not
// require existence of a context in this case.
memset(&js_spill_information_, 0, sizeof(js_spill_information_));
memset(code_kind_statistics_, 0,
sizeof(code_kind_statistics_[0]) * Code::NUMBER_OF_KINDS);
-
- compiler_thread_handle_deref_state_ = HandleDereferenceGuard::ALLOW;
- execution_thread_handle_deref_state_ = HandleDereferenceGuard::ALLOW;
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
}
-#ifdef DEBUG
-HandleDereferenceGuard::State Isolate::HandleDereferenceGuardState() {
- if (execution_thread_handle_deref_state_ == HandleDereferenceGuard::ALLOW &&
- compiler_thread_handle_deref_state_ == HandleDereferenceGuard::ALLOW) {
- // Short-cut to avoid polling thread id.
- return HandleDereferenceGuard::ALLOW;
- }
- if (FLAG_parallel_recompilation &&
- optimizing_compiler_thread()->IsOptimizerThread()) {
- return compiler_thread_handle_deref_state_;
- } else {
- return execution_thread_handle_deref_state_;
- }
-}
-
-
-void Isolate::SetHandleDereferenceGuardState(
- HandleDereferenceGuard::State state) {
- if (FLAG_parallel_recompilation &&
- optimizing_compiler_thread()->IsOptimizerThread()) {
- compiler_thread_handle_deref_state_ = state;
- } else {
- execution_thread_handle_deref_state_ = state;
- }
-}
-#endif
-
-
HStatistics* Isolate::GetHStatistics() {
if (hstatistics() == NULL) set_hstatistics(new HStatistics());
return hstatistics();
#include "../include/v8-debug.h"
#include "allocation.h"
#include "apiutils.h"
+#include "assert-scope.h"
#include "atomicops.h"
#include "builtins.h"
#include "contexts.h"
}
int* code_kind_statistics() { return code_kind_statistics_; }
-
- HandleDereferenceGuard::State HandleDereferenceGuardState();
-
- void SetHandleDereferenceGuardState(HandleDereferenceGuard::State state);
#endif
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
HistogramInfo heap_histograms_[LAST_TYPE + 1];
JSObject::SpillInformation js_spill_information_;
int code_kind_statistics_[Code::NUMBER_OF_KINDS];
-
- HandleDereferenceGuard::State compiler_thread_handle_deref_state_;
- HandleDereferenceGuard::State execution_thread_handle_deref_state_;
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
bool ParseJsonString(Handle<String> expected) {
int length = expected->length();
if (source_->length() - position_ - 1 > length) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
String::FlatContent content = expected->GetFlatContent();
if (content.IsAscii()) {
ASSERT_EQ('"', c0_);
if (object->IsOneByteRepresentationUnderneath()) {
Handle<String> result =
isolate->factory()->NewRawOneByteString(worst_case_length);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
return StringifyString_<SeqOneByteString>(
isolate,
object->GetFlatContent().ToOneByteVector(),
} else {
Handle<String> result =
isolate->factory()->NewRawTwoByteString(worst_case_length);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
return StringifyString_<SeqTwoByteString>(
isolate,
object->GetFlatContent().ToUC16Vector(),
MaybeObject* BasicJsonStringifier::StringifyString_(Isolate* isolate,
Vector<Char> vector,
Handle<String> result) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_gc;
int final_size = 0;
ResultType* dest = ResultType::cast(*result);
dest->Set(final_size++, '\"');
// is a more pessimistic estimate, but faster to calculate.
if (((part_length_ - current_index_) >> 3) > length) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_gc;
Vector<const Char> vector = GetCharVector<Char>(string);
if (is_ascii) {
current_index_ += SerializeStringUnchecked_(
for (int i = 0; i < length; i++) {
// If GC moved the string, we need to refresh the vector.
if (*string != string_location) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
// This does not actually prevent the string from being relocated later.
vector = GetCharVector<Char>(string);
string_location = *string;
String* subject,
int from,
int to) {
- NoHandleAllocation no_handles(array->GetIsolate());
+ SealHandleScope shs(array->GetIsolate());
RegExpImpl::SetLastCaptureCount(array, 2);
RegExpImpl::SetLastSubject(array, subject);
RegExpImpl::SetLastInput(array, subject);
ASSERT(index <= subject->length());
if (!subject->IsFlat()) FlattenString(subject);
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
String* needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
int needle_len = needle->length();
if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value();
ASSERT_EQ(res, RegExpImpl::RE_SUCCESS);
- NoHandleAllocation no_handles(isolate);
+ SealHandleScope shs(isolate);
FixedArray* array = FixedArray::cast(last_match_info->elements());
SetAtomLastCapture(array, *subject, output_registers[0], output_registers[1]);
return last_match_info;
ASSERT(last_match_info->HasFastObjectElements());
int capture_register_count = (capture_count + 1) * 2;
last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_allocation;
FixedArray* array = FixedArray::cast(last_match_info->elements());
if (match != NULL) {
for (int i = 0; i < capture_register_count; i += 2) {
LChunk* LChunk::NewChunk(HGraph* graph) {
- NoHandleAllocation no_handles(graph->isolate());
- AssertNoAllocation no_gc;
-
+ DisallowHandleAllocation no_handles;
+ DisallowHeapAllocation no_gc;
int values = graph->GetMaximumValueID();
CompilationInfo* info = graph->info();
if (values > LUnallocated::kMaxVirtualRegisters) {
ASSERT(!heap->InNewSpace(*substitution));
- AssertNoAllocation no_allocations_please;
+ DisallowHeapAllocation no_allocation;
ReplacingVisitor visitor(*original, *substitution);
template<typename Visitor>
static void IterateJSFunctions(SharedFunctionInfo* shared_info,
Visitor* visitor) {
- AssertNoAllocation no_allocations_please;
+ DisallowHeapAllocation no_allocation;
HeapIterator iterator(shared_info->GetHeap());
for (HeapObject* obj = iterator.next(); obj != NULL;
// Returns true if an instance of candidate were inlined into function's code.
static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
if (function->code()->kind() != Code::OPTIMIZED_FUNCTION) return false;
static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
DependentFunctionFilter filter(function_info);
Deoptimizer::DeoptimizeAllFunctionsWith(function_info->GetIsolate(), &filter);
code->instruction_start());
{
- AssertNoAllocation no_allocations_please;
+ DisallowHeapAllocation no_allocation;
for (RelocIterator it(*code); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
if (RelocInfo::IsPosition(rinfo->rmode())) {
void LogMessageBuilder::Append(String* str) {
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
+ DisallowHeapAllocation no_gc; // Ensure string stay valid.
int length = str->length();
for (int i = 0; i < length; i++) {
Append(static_cast<char>(str->Get(i)));
void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
if (str == NULL) return;
- AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
+ DisallowHeapAllocation no_gc; // Ensure string stay valid.
int len = str->length();
if (len > 0x1000)
len = 0x1000;
Handle<SharedFunctionInfo>* sfis,
Handle<Code>* code_objects) {
HeapIterator iterator(heap);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
int compiled_funcs_count = 0;
// Iterate the heap to find shared function info objects and record
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"Logger::LogCodeObjects");
HeapIterator iterator(heap);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsCode()) LogCodeObject(obj);
}
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"Logger::LogAccessorCallbacks");
HeapIterator iterator(heap);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (!obj->IsExecutableAccessorInfo()) continue;
ExecutableAccessorInfo* ai = ExecutableAccessorInfo::cast(obj);
void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
- AssertNoAllocation no_allocation_scope;
+ DisallowHeapAllocation no_allocation;
DependentCode* entries = map->dependent_code();
DependentCode::GroupStartIndexes starts(entries);
int number_of_entries = starts.number_of_entries();
void MarkCompactCollector::ClearNonLiveDependentCode(Map* map) {
- AssertNoAllocation no_allocation_scope;
+ DisallowHeapAllocation no_allocation;
DependentCode* entries = map->dependent_code();
DependentCode::GroupStartIndexes starts(entries);
int number_of_entries = starts.number_of_entries();
void MarkingThread::Run() {
Isolate::SetIsolateThreadLocals(isolate_, NULL);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
while (true) {
start_marking_semaphore_->Wait();
#ifdef DEBUG
Isolate* isolate = Isolate::Current();
#endif
- ALLOW_HANDLE_DEREF(isolate, "using and embedding raw address");
+ AllowDeferredHandleDereference using_raw_address;
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
JSFunction* function) {
Isolate* isolate = function->GetIsolate();
HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation nha;
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { ALLOW_HANDLE_DEREF(isolate(),
- "copying a ZoneList of handles into a FixedArray");
+ { AllowDeferredHandleDereference copy_handles;
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value();
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (value->IsSmi()) {
__ li(ToRegister(instr->result()), Operand(value));
} else {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
Register reg = ToRegister(instr->value());
Handle<JSFunction> target = instr->hydrogen()->target();
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value());
Handle<JSGlobalPropertyCell> cell =
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
const Operand& rt,
BranchDelaySlot bd) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
}
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
return CallSize(reinterpret_cast<Address>(code.location()),
rmode, cond, rs, rt, bd);
}
SetRecordedAstId(ast_id);
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
void LoadHeapObject(Register dst, Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
- ALLOW_HANDLE_DEREF(isolate(), "heap object check");
+ AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
// GetElement can trigger a getter which can cause allocation.
// This was not always the case. This ASSERT is here to catch
// leftover incorrect uses.
- ASSERT(HEAP->IsAllocationAllowed());
+ ASSERT(AllowHeapAllocation::IsAllowed());
return GetElementWithReceiver(this, index);
}
Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
if (!map->HasTransitionArray()) return Handle<String>::null();
TransitionArray* transitions = map->transitions();
if (!transitions->IsSimpleTransition()) return Handle<String>::null();
Handle<Map> JSObject::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
if (!map->HasTransitionArray()) return Handle<Map>::null();
TransitionArray* transitions = map->transitions();
int transition = transitions->Search(*key);
}
-WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
+WriteBarrierMode HeapObject::GetWriteBarrierMode(
+ const DisallowHeapAllocation& promise) {
Heap* heap = GetHeap();
if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER;
details.IsDeleted() ||
details.dictionary_index() > 0);
int index = HashTable<Shape, Key>::EntryToIndex(entry);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
FixedArray::set(index, key, mode);
FixedArray::set(index+1, value, mode);
// allowed. This is to avoid an assertion failure when allocating.
// Flattening strings is the only case where we always allow
// allocation because no GC is performed if the allocation fails.
- if (!HEAP->IsAllocationAllowed()) return this;
+ if (!AllowHeapAllocation::IsAllowed()) return this;
#endif
Heap* heap = GetHeap();
Map* Map::CurrentMapForDeprecated() {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
if (!is_deprecated()) return this;
DescriptorArray* old_descriptors = instance_descriptors();
bool JSObject::ReferencesObject(Object* obj) {
Map* map_of_this = map();
Heap* heap = GetHeap();
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
// Is the object the constructor for this object?
if (map_of_this->constructor() == obj) {
}
FixedArray* result = FixedArray::cast(obj);
// Copy the content
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
int len = length();
if (new_length < len) len = new_length;
// We are taking the map from the old fixed array so the map is sure to
void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos, int len) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = dest->GetWriteBarrierMode(no_gc);
for (int index = 0; index < len; index++) {
dest->set(dest_pos+index, get(pos+index), mode);
String::FlatContent String::GetFlatContent() {
- ASSERT(!GetHeap()->allow_allocation(false));
+ ASSERT(!AllowHeapAllocation::IsAllowed());
int length = this->length();
StringShape shape(this);
String* string = this;
if (str_ == NULL) return;
Handle<String> str(str_);
ASSERT(str->IsFlat());
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
// This does not actually prevent the vector from being relocated later.
String::FlatContent content = str->GetFlatContent();
ASSERT(content.IsFlat());
bool String::IsOneByteEqualTo(Vector<const uint8_t> str) {
int slen = length();
if (str.length() != slen) return false;
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
FlatContent content = GetFlatContent();
if (content.IsAscii()) {
return CompareChars(content.ToOneByteVector().start(),
bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
int slen = length();
if (str.length() != slen) return false;
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
FlatContent content = GetFlatContent();
if (content.IsTwoByte()) {
return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
ASSERT(!has_deoptimization_support());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Code* code = this->code();
if (IsCodeEquivalent(code, recompiled)) {
// Copy the deoptimization data from the recompiled code.
RelocInfo::kApplyMask;
// Needed to find target_object and runtime_entry on X64
Assembler* origin = desc.origin;
- ALLOW_HANDLE_DEREF(GetIsolate(), "embedding raw addresses into code");
+ AllowDeferredHandleDereference embedding_raw_address;
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Map* Code::FindFirstMap() {
ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
void Code::ReplaceFirstMap(Map* replace_with) {
ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
void Code::FindAllMaps(MapHandleList* maps) {
ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Code* Code::FindFirstCode() {
ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
void Code::FindAllCode(CodeHandleList* code_list, int length) {
ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
int i = 0;
for (RelocIterator it(this, mask); !it.done(); it.next()) {
Name* Code::FindFirstName() {
ASSERT(is_inline_cache_stub());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
void DependentCode::DeoptimizeDependentCodeGroup(
Isolate* isolate,
DependentCode::DependencyGroup group) {
- AssertNoAllocation no_allocation_scope;
+ DisallowHeapAllocation no_allocation_scope;
DependentCode::GroupStartIndexes starts(this);
int start = starts.at(group);
int end = starts.at(group + 1);
void Dictionary<Shape, Key>::CopyValuesTo(FixedArray* elements) {
int pos = 0;
int capacity = HashTable<Shape, Key>::Capacity();
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (int i = 0; i < capacity; i++) {
Object* k = Dictionary<Shape, Key>::KeyAt(i);
MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
ASSERT(NumberOfElements() < new_table->Capacity());
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
// Copy prefix to new array.
}
SeededNumberDictionary* new_dict = SeededNumberDictionary::cast(obj);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_alloc;
uint32_t pos = 0;
uint32_t undefs = 0;
}
} else {
FixedArray* elements = FixedArray::cast(elements_base);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
// Split elements into defined, undefined and the_hole, in that order. Only
// count locations for undefined and the hole, and fill them afterwards.
- WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_alloc);
+ WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_gc);
unsigned int undefs = limit;
unsigned int holes = limit;
// Assume most arrays contain no holes and undefined values, so minimize the
value->set_serialized_data(*serialized_descriptor);
// Copy in the data.
{
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
uint8_t* array = serialized_descriptor->GetDataStartAddress();
if (previous_length != 0) {
uint8_t* previous_array =
#define V8_OBJECTS_H_
#include "allocation.h"
+#include "assert-scope.h"
#include "builtins.h"
#include "elements-kind.h"
#include "list.h"
// object as a sign that they are not going to use this function
// from code that allocates and thus invalidates the returned write
// barrier mode.
- inline WriteBarrierMode GetWriteBarrierMode(const AssertNoAllocation&);
+ inline WriteBarrierMode GetWriteBarrierMode(
+ const DisallowHeapAllocation& promise);
// Dispatched behavior.
void HeapObjectShortPrint(StringStream* accumulator);
thread_id_ = ThreadId::Current().ToInteger();
#endif
Isolate::SetIsolateThreadLocals(isolate_, NULL);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
int64_t epoch = 0;
if (FLAG_trace_parallel_recompilation) epoch = OS::Ticks();
// Mark it for installing before queuing so that we can be sure of the write
// order: marking first and (after being queued) installing code second.
{ Heap::RelocationLock relocation_lock(isolate_->heap());
+ AllowHandleDereference ahd;
optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
}
output_queue_.Enqueue(optimizing_compiler);
RUNTIME_FUNCTION(MaybeObject*, Runtime_SymbolName) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(Symbol, symbol, 0);
return symbol->name();
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
Object* prototype = args[1];
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSFunctionProxy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(JSReceiver, handler, 0);
Object* call_trap = args[1];
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSProxy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
return isolate->heap()->ToBoolean(obj->IsJSProxy());
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSFunctionProxy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
return isolate->heap()->ToBoolean(obj->IsJSFunctionProxy());
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
return proxy->handler();
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
return proxy->call_trap();
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
return proxy->construct_trap();
RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
proxy->Fix();
RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferGetByteLength) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSArrayBuffer, holder, 0);
return holder->byte_length();
RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
if (!obj->IsJSObject()) return isolate->heap()->null_value();
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
// We don't expect access checks to be needed on JSProxy objects.
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_ARG_CHECKED(Object, prototype, 1);
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
// See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
Object* O = args[0];
RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
return obj->PreventExtensions();
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
if (obj->IsJSGlobalProxy()) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* arg = args[0];
bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo();
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(HeapObject, templ, 0);
CONVERT_SMI_ARG_CHECKED(index, 1)
RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(HeapObject, object, 0);
Map* old_map = object->map();
RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(HeapObject, object, 0);
Map* old_map = object->map();
RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
- NoHandleAllocation nha(isolate);
+ SealHandleScope shs(isolate);
// args[0] == name
// args[1] == language_mode
// args[2] == value (optional)
RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
// All constants are declared with an initial value. The name
// of the constant is the first argument and the initial value
// is the second.
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_SMI_ARG_CHECKED(elements_count, 0);
if (elements_count < 0 ||
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
}
{
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
HandleScope scope(isolate);
reinterpret_cast<HeapObject*>(new_object)->
set_map(isolate->native_context()->regexp_result_map());
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
- NoHandleAllocation ha(isolate);
- AssertNoAllocation no_alloc;
+ SealHandleScope shs(isolate);
+ DisallowHeapAllocation no_allocation;
ASSERT(args.length() == 5);
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_CHECKED(String, source, 1);
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsClassicModeFunction) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
if (!callable->IsJSFunction()) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
return isolate->heap()->ToBoolean(
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
f->shared()->set_name_should_print_as_anonymous(true);
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsGenerator) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
return isolate->heap()->ToBoolean(f->shared()->is_generator());
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(Code, code, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
JavaScriptFrameIterator it(isolate);
RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
// EmitGeneratorResumeResume is called in any case, as it needs to reconstruct
// the stack frame and make space for arguments and operands.
RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
CONVERT_ARG_CHECKED(Object, value, 1);
RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectFreeze) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, object, 0);
return object->Freeze(isolate);
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, subject, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_CharFromCode) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
return CharFromCode(isolate, args[0]);
}
Handle<String> joined_string;
if (is_ascii_) {
Handle<SeqOneByteString> seq = NewRawOneByteString(character_count_);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
uint8_t* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
char_buffer,
} else {
// Non-ASCII.
Handle<SeqTwoByteString> seq = NewRawTwoByteString(character_count_);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
uc16* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
char_buffer,
int capture_count,
int subject_length) {
{
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_gc;
String::FlatContent content = replacement->GetFlatContent();
ASSERT(content.IsFlat());
bool simple = false;
unsigned int limit,
Zone* zone) {
{
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
String::FlatContent subject_content = subject->GetFlatContent();
String::FlatContent pattern_content = pattern->GetFlatContent();
ASSERT(subject_content.IsFlat());
if (!sub->IsFlat()) FlattenString(sub);
if (!pat->IsFlat()) FlattenString(pat);
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
// Extract flattened substrings of cons strings before determining asciiness.
String::FlatContent seq_sub = sub->GetFlatContent();
String::FlatContent seq_pat = pat->GetFlatContent();
if (!pat->IsFlat()) FlattenString(pat);
int position = -1;
- AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
String::FlatContent sub_content = sub->GetFlatContent();
String::FlatContent pat_content = pat->GetFlatContent();
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, str1, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(String, value, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(radix, 1);
RUNTIME_ASSERT(2 <= radix && radix <= 36);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
Handle<Object> object = args.at<Object>(0);
// KeyedGetProperty is called from KeyedLoadIC::GenerateGeneric.
RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
// Fast cases for getting named properties of the receiver JSObject
// Return property without being observable by accessors or interceptors.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
Handle<Object> object = args.at<Object>(0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
if (object->IsJSObject()) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
if (object->IsJSObject()) {
// This is used to decide if we should transform null and undefined
// into the global object when doing call and apply.
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
// Check whether debugger and is about to step into the callback that is passed
// to a built-in function such as Array.forEach.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugCallbackSupportsStepping) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (!isolate->IsDebuggerActive() || !isolate->debug()->StepInActive()) {
return isolate->heap()->false_value();
// Set one shot breakpoints for the callback function that is passed to a
// built-in function such as Array.forEach to enable stepping into the callback.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = isolate->debug();
if (!debug->IsStepping()) return isolate->heap()->undefined_value();
// Set a local property, even if it is READ_ONLY. If the property does not
// exist, it will be added with attributes NONE.
RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
CONVERT_ARG_CHECKED(JSObject, object, 0);
CONVERT_ARG_CHECKED(Name, name, 1);
RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSReceiver, object, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(Name, key, 1);
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
CONVERT_ARG_CHECKED(Name, key, 1);
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, object, 0);
// have none, the map of the object. This is used to speed up
// the check for deletions during a for-in.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, raw_object, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
// Compute the frame holding the arguments.
RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* object = args[0];
return (object->IsJSObject() && !object->IsGlobalObject())
RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
return isolate->heap()->ToBoolean(args[0]->BooleanValue());
// Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
// Possible optimizations: put the type string into the oddballs.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Typeof) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
Object* obj = args[0];
if (obj->IsNumber()) return isolate->heap()->number_string();
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, subject, 0);
subject->TryFlatten();
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewString) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_SMI_ARG_CHECKED(length, 0);
CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
if (length == 0) return isolate->heap()->empty_string();
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(String, s, 0);
CONVERT_SMI_ARG_CHECKED(radix, 1);
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(String, str, 0);
// ECMA-262 section 15.1.2.3, empty string is NaN
Arguments args,
Isolate* isolate,
unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(String, s, 0);
s = s->TryFlattenGetString();
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(String, s, 0);
const uint8_t* chars,
FixedArray* elements,
int length) {
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
FixedArray* ascii_cache = heap->single_character_string_cache();
Object* undefined = heap->undefined_value();
int i;
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
elements = Handle<FixedArray>(FixedArray::cast(obj), isolate);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
String::FlatContent content = s->GetFlatContent();
if (content.IsAscii()) {
Vector<const uint8_t> chars = content.ToOneByteVector();
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, value, 0);
return value->ToObject();
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* number = args[0];
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* number = args[0];
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
// ES6 draft 9.1.11
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPositiveInteger) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
// Converts a Number to a Smi, if possible. Returns NaN if the number is not
// a small integer.
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateHeapNumber) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return isolate->heap()->AllocateHeapNumber(0);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return isolate->heap()->NumberFromDouble(9876543210.0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberImul) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, str1, 0);
CONVERT_ARG_CHECKED(String, str2, 1);
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, array, 0);
if (!args[1]->IsSmi()) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, array, 0);
if (!args[1]->IsSmi()) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, elements_array, 0);
RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, x, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
// Compare two Smis as if they were converted to strings and then
// compared lexicographically.
RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(x_value, 0);
CONVERT_SMI_ARG_CHECKED(y_value, 1);
equal_prefix_result = Smi::FromInt(LESS);
}
int r;
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
String::FlatContent x_content = x->GetFlatContent();
String::FlatContent y_content = y->GetFlatContent();
if (x_content.IsAscii()) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, x, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_acos()->Increment();
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_asin()->Increment();
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_atan()->Increment();
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_atan2()->Increment();
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_ceil()->Increment();
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_cos()->Increment();
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_exp()->Increment();
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_floor()->Increment();
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_log()->Increment();
// Slow version of Math.pow. We check for fast paths for special cases.
// Used if SSE2/VFP3 is not available.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_pow()->Increment();
// Fast version of Math.pow if we know that y is not an integer and y is not
// -0.5 or 0.5. Used as slow case from full codegen.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_pow()->Increment();
RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_round()->Increment();
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_sin()->Increment();
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_sqrt()->Increment();
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_tan()->Increment();
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(year, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
JSFunction* callee = JSFunction::cast(args[0]);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
FixedArray* array = reinterpret_cast<FixedArray*>(obj);
array->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
array->set_length(length);
HandleScope scope(isolate);
ASSERT(args.length() == 0);
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- ASSERT(isolate->heap()->IsAllocationAllowed());
+ ASSERT(AllowHeapAllocation::IsAllowed());
delete deoptimizer;
return isolate->heap()->undefined_value();
}
Deoptimizer::BailoutType type =
static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- ASSERT(isolate->heap()->IsAllocationAllowed());
+ ASSERT(AllowHeapAllocation::IsAllowed());
ASSERT(deoptimizer->compiled_code_kind() == Code::OPTIMIZED_FUNCTION);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
delete deoptimizer;
return isolate->heap()->undefined_value();
RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
#if defined(USE_SIMULATOR)
return isolate->heap()->true_value();
#else
RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckIsBootstrapping) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetRootNaN) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
return isolate->heap()->nan_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
JSObject* extension_object;
if (args[0]->IsJSObject()) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
String* name = String::cast(args[0]);
Object* thrown_object = args[1];
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
ScopeInfo* scope_info = ScopeInfo::cast(args[0]);
JSFunction* function;
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
return isolate->heap()->ToBoolean(obj->IsJSModule());
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(index, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_PromoteScheduledException) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT_EQ(0, args.length());
return isolate->PromoteScheduledException();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
// First check if this is a real stack overflow.
if (isolate->stack_guard()->IsStackOverflow()) {
- NoHandleAllocation na(isolate);
+ SealHandleScope shs(isolate);
return isolate->StackOverflow();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return Execution::HandleStackGuardInterrupt(isolate);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
PrintTransition(isolate, NULL);
return isolate->heap()->undefined_value();
RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceExit) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
PrintTransition(isolate, args[0]);
return args[0]; // return TOS
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
#ifdef DEBUG
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
isolate->PrintStack(stdout);
return isolate->heap()->undefined_value();
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCurrentTime) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
// According to ECMA-262, section 15.9.1, page 117, the precision of
if (maybe_result_array->IsFailure()) return maybe_result_array;
RUNTIME_ASSERT(output->HasFastObjectElements());
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_gc;
FixedArray* output_array = FixedArray::cast(output->elements());
RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* global = args[0];
if (!global->IsJSGlobalObject()) return isolate->heap()->null_value();
// Allocate a block of memory in the given space (filled with a filler).
// Use as fallback for allocation in generated code when the space
// is full.
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
RUNTIME_ASSERT(IsAligned(size, kPointerSize));
RUNTIME_ASSERT(size > 0);
Heap* heap = isolate->heap();
RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
return Allocate(isolate, size_smi->value(), NEW_SPACE);
RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldPointerSpace) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
return Allocate(isolate, size_smi->value(), OLD_POINTER_SPACE);
RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldDataSpace) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
return Allocate(isolate, size_smi->value(), OLD_DATA_SPACE);
// array. Returns true if the element was pushed on the stack and
// false otherwise.
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSArray, array, 0);
CONVERT_ARG_CHECKED(JSReceiver, element, 1);
// This will not allocate (flatten the string), but it may run
// very slowly for very deeply nested ConsStrings. For debugging use only.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, string, 0);
// property.
// Returns the number of non-undefined elements collected.
RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, object, 0);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
// Move contents of argument 0 (an array) to argument 1 (an array)
RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSArray, from, 0);
CONVERT_ARG_CHECKED(JSArray, to, 1);
// How many elements does this object/array have?
RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, object, 0);
HeapObject* elements = object->elements();
RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
CONVERT_ARG_CHECKED(Name, name, 1);
#ifdef ENABLE_DEBUGGER_SUPPORT
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugBreak) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return Execution::DebugBreakHelper();
}
// clearing the event listener function
// args[1]: object supplied during callback
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
RUNTIME_ASSERT(args[0]->IsJSFunction() ||
args[0]->IsUndefined() ||
RUNTIME_FUNCTION(MaybeObject*, Runtime_Break) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
isolate->stack_guard()->DebugBreak();
return isolate->heap()->undefined_value();
// Return the property type calculated from the property details.
// args[0]: smi with property details.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
return Smi::FromInt(static_cast<int>(details.type()));
// Return the property attribute calculated from the property details.
// args[0]: smi with property details.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
return Smi::FromInt(static_cast<int>(details.attributes()));
// Return the property insertion index calculated from the property details.
// args[0]: smi with property details.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
// TODO(verwaest): Depends on the type of details.
RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckExecutionState) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() >= 1);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
// Check that the break id is valid.
FixedArray* instances, int instances_size,
JSFunction* arguments_function) {
Isolate* isolate = target->GetIsolate();
- NoHandleAllocation ha(isolate);
- AssertNoAllocation no_alloc;
+ SealHandleScope shs(isolate);
+ DisallowHeapAllocation no_allocation;
// Iterate the heap.
int count = 0;
// args[1]: constructor function for instances to exclude (Mirror)
// args[2]: the the maximum number of objects to return
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 3);
// First perform a full GC in order to avoid references from dead objects.
int max_references,
FixedArray* instances,
int instances_size) {
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
// Iterate the heap.
int count = 0;
// args[0]: the constructor to find instances of
// args[1]: the the maximum number of objects to return
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
// First perform a full GC in order to avoid dead objects.
// Find the effective prototype object as returned by __proto__.
// args[0]: the object to find the prototype for.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, obj, 0);
return GetPrototypeSkipHiddenPrototypes(isolate, obj);
RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
CPU::DebugBreak();
return isolate->heap()->undefined_value();
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
Script* script,
FixedArray* buffer) {
- AssertNoAllocation no_allocations;
+ DisallowHeapAllocation no_allocation;
int counter = 0;
int buffer_size = buffer->length();
for (HeapObject* obj = iterator->next();
Heap* heap = isolate->heap();
{
heap->EnsureHeapIsIterable();
- AssertNoAllocation no_allocations;
+ DisallowHeapAllocation no_allocation;
HeapIterator heap_iterator(heap);
Script* scr = *script;
FixedArray* arr = *array;
if (number > kBufferSize) {
array = isolate->factory()->NewFixedArray(number);
heap->EnsureHeapIsIterable();
- AssertNoAllocation no_allocations;
+ DisallowHeapAllocation no_allocation;
HeapIterator heap_iterator(heap);
Script* scr = *script;
FixedArray* arr = *array;
// Sets a v8 flag.
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(String, arg, 0);
SmartArrayPointer<char> flags =
arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
// Performs a GC.
// Presently, it only does a full GC.
RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "%CollectGarbage");
return isolate->heap()->undefined_value();
}
// Gets the current heap usage.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
int usage = static_cast<int>(isolate->heap()->SizeOfObjects());
if (!Smi::IsValid(usage)) {
return *isolate->factory()->NewNumberFromInt(usage);
RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
v8::V8::ResumeProfiler();
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
v8::V8::PauseProfiler();
return isolate->heap()->undefined_value();
}
Handle<Script> script;
Heap* heap = script_name->GetHeap();
heap->EnsureHeapIsIterable();
- AssertNoAllocation no_allocation_during_heap_iteration;
+ DisallowHeapAllocation no_allocation_during_heap_iteration;
HeapIterator iterator(heap);
HeapObject* obj = NULL;
while (script.is_null() && ((obj = iterator.next()) != NULL)) {
// Returns V8 version as a string.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT_EQ(args.length(), 0);
const char* version_string = v8::V8::GetVersion();
RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
OS::PrintError("abort: %s\n",
reinterpret_cast<char*>(args[0]) + args.smi_at(1));
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
// This is only called from codegen, so checks might be more lax.
CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0);
Object* key = args[1];
RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
return Smi::FromInt(message->start_position());
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
return message->script();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, format, 0);
CONVERT_ARG_CHECKED(JSArray, elms, 1);
- AssertNoAllocation no_gc;
+ DisallowHeapAllocation no_gc;
String::FlatContent format_content = format->GetFlatContent();
RUNTIME_ASSERT(format_content.IsAscii());
Vector<const uint8_t> chars = format_content.ToOneByteVector();
RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, obj1, 0);
CONVERT_ARG_CHECKED(JSObject, obj2, 1);
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
if (!args[0]->IsJSReceiver()) return isolate->heap()->false_value();
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
CONVERT_BOOLEAN_ARG_CHECKED(is_observed, 1);
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetObserverDeliveryPending) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
isolate->set_observer_delivery_pending(true);
return isolate->heap()->undefined_value();
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 0);
return isolate->heap()->observation_state();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_UnwrapGlobalProxy) {
- NoHandleAllocation ha(isolate);
+ SealHandleScope shs(isolate);
ASSERT(args.length() == 1);
Object* object = args[0];
if (object->IsJSGlobalProxy()) {
static void PrintBits(uint8_t byte, int digits);
- AssertNoAllocation no_allocation_;
+ DisallowHeapAllocation no_allocation_;
Code* code_;
unsigned length_;
unsigned entry_size_;
class SerializationAddressMapper {
public:
SerializationAddressMapper()
- : serialization_map_(new HashMap(&SerializationMatchFun)),
- no_allocation_(new AssertNoAllocation()) { }
+ : no_allocation_(),
+ serialization_map_(new HashMap(&SerializationMatchFun)) { }
~SerializationAddressMapper() {
delete serialization_map_;
- delete no_allocation_;
}
bool IsMapped(HeapObject* obj) {
return reinterpret_cast<void*>(v);
}
+ DisallowHeapAllocation no_allocation_;
HashMap* serialization_map_;
- AssertNoAllocation* no_allocation_;
DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper);
};
void SweeperThread::Run() {
Isolate::SetIsolateThreadLocals(isolate_, NULL);
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+
while (true) {
start_sweeping_semaphore_->Wait();
// dictionary (possibly triggering GC), and finally we relocate the collected
// infos before we process them.
void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
ZoneList<RelocInfo> infos(16, zone());
HandleScope scope(isolate_);
GetRelocInfos(code, &infos);
void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
ZoneList<RelocInfo>* infos) {
- DisableAssertNoAllocation allocation_allowed;
+ AllowHeapAllocation allocation_allowed;
int cell_count = code->type_feedback_info()->IsTypeFeedbackInfo()
? TypeFeedbackInfo::cast(code->type_feedback_info())->
type_feedback_cells()->CellCount()
template<typename Char>
Handle<String> URIUnescape::Unescape(Isolate* isolate, Handle<String> source) {
int index;
- { AssertNoAllocation no_allocation;
+ { DisallowHeapAllocation no_allocation;
StringSearch<uint8_t, Char> search(isolate, STATIC_ASCII_VECTOR("%"));
index = search.Search(GetCharVector<Char>(source), 0);
if (index < 0) return source;
int length = string->length();
int unescaped_length = 0;
- { AssertNoAllocation no_allocation;
+ { DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; unescaped_length++) {
int step;
if (one_byte) {
Handle<SeqOneByteString> dest =
isolate->factory()->NewRawOneByteString(unescaped_length);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; dest_position++) {
int step;
} else {
Handle<SeqTwoByteString> dest =
isolate->factory()->NewRawTwoByteString(unescaped_length);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = start_index; i < length; dest_position++) {
int step;
int escaped_length = 0;
int length = string->length();
- { AssertNoAllocation no_allocation;
+ { DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = 0; i < length; i++) {
uint16_t c = vector[i];
isolate->factory()->NewRawOneByteString(escaped_length);
int dest_position = 0;
- { AssertNoAllocation no_allocation;
+ { DisallowHeapAllocation no_allocation;
Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = 0; i < length; i++) {
uint16_t c = vector[i];
#include "v8globals.h"
#include "v8checks.h"
#include "allocation.h"
+#include "assert-scope.h"
#include "v8utils.h"
#include "flags.h"
// Converts a number into size_t.
inline size_t NumberToSize(Isolate* isolate,
Object* number) {
- NoHandleAllocation hc(isolate);
+ SealHandleScope shs(isolate);
if (number->IsSmi()) {
return Smi::cast(number)->value();
} else {
class Allocation;
class Arguments;
class Assembler;
-class AssertNoAllocation;
class Code;
class CodeGenerator;
class CodeStub;
void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
- ALLOW_HANDLE_DEREF(isolate(), "using and embedding raw address");
+ AllowDeferredHandleDereference using_raw_address;
// If there is no relocation info, emit the value of the handle efficiently
// (possibly using less that 8 bytes for the value).
if (RelocInfo::IsNone(mode)) {
JSFunction* function) {
Isolate* isolate = function->GetIsolate();
HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
+ DisallowHeapAllocation nha;
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { ALLOW_HANDLE_DEREF(isolate(),
- "copying a ZoneList of handles into a FixedArray");
+ { AllowDeferredHandleDereference copy_handles;
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value();
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (value->IsSmi()) {
__ Move(ToRegister(instr->result()), value);
} else {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (object->IsSmi()) {
__ Push(Handle<Smi>::cast(object));
} else {
void MacroAssembler::Move(Register dst, Handle<Object> source) {
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Move(dst, Smi::cast(*source));
} else {
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
} else {
void MacroAssembler::Push(Handle<Object> source) {
- ALLOW_HANDLE_DEREF(isolate(), "smi check");
+ AllowDeferredHandleDereference smi_check;
if (source->IsSmi()) {
Push(Smi::cast(*source));
} else {
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- ALLOW_HANDLE_DEREF(isolate(), "using raw address");
+ AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
void MacroAssembler::LoadGlobalCell(Register dst,
Handle<JSGlobalPropertyCell> cell) {
if (dst.is(rax)) {
- ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
+ AllowDeferredHandleDereference embedding_raw_address;
load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
} else {
movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
void PushHeapObject(Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
- ALLOW_HANDLE_DEREF(isolate(), "heap object check");
+ AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
}
void CmpObject(Register reg, Handle<Object> object) {
- ALLOW_HANDLE_DEREF(isolate(), "heap object check");
+ AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
CmpHeapObject(reg, Handle<HeapObject>::cast(object));
} else {
# Deferred stack trace formatting is temporarily disabled.
test-heap/ReleaseStackTraceData: PASS || FAIL
+# Boot up memory use is bloated in debug mode.
+test-mark-compact/BootUpMemoryUse: PASS, PASS || FAIL if $mode == debug
+
##############################################################################
[ $arch == arm ]
*v8::Handle<v8::Function>::Cast(
v8::Context::GetCurrent()->Global()->Get(v8_str("g"))));
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
g->shared()->PrintLn();
}
#endif // OBJECT_PRINT
void AccumulateStats(Handle<String> cons_string, ConsStringStats* stats) {
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
if (cons_string->IsConsString()) {
return AccumulateStats(ConsString::cast(*cons_string), stats);
}
Handle<String> cons_string = build(i, &data);
ConsStringStats cons_string_stats;
AccumulateStats(cons_string, &cons_string_stats);
- AssertNoAllocation no_alloc;
+ DisallowHeapAllocation no_allocation;
PrintStats(data);
// Full verify of cons string.
cons_string_stats.VerifyEqual(flat_string_stats);
'../../src/arguments.h',
'../../src/assembler.cc',
'../../src/assembler.h',
+ '../../src/assert-scope.h',
'../../src/ast.cc',
'../../src/ast.h',
'../../src/atomicops.h',