#include "src/builtins.h"
#include "src/cpu-profiler.h"
#include "src/gdb-jit.h"
-#include "src/ic-inl.h"
+#include "src/heap/mark-compact.h"
#include "src/heap-profiler.h"
-#include "src/mark-compact.h"
+#include "src/ic-inl.h"
+#include "src/prototype.h"
#include "src/stub-cache.h"
#include "src/vm-state-inl.h"
: Arguments(length, arguments) { }
Object*& operator[] (int index) {
- ASSERT(index < length());
+ DCHECK(index < length());
return Arguments::operator[](index);
}
template <class S> Handle<S> at(int index) {
- ASSERT(index < length());
+ DCHECK(index < length());
return Arguments::at<S>(index);
}
#ifdef DEBUG
void Verify() {
// Check we have at least the receiver.
- ASSERT(Arguments::length() >= 1);
+ DCHECK(Arguments::length() >= 1);
}
#endif
};
template <>
void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() {
// Check we have at least the receiver and the called function.
- ASSERT(Arguments::length() >= 2);
+ DCHECK(Arguments::length() >= 2);
// Make sure cast to JSFunction succeeds.
called_function();
}
// that the state of the stack is as we assume it to be in the
// code below.
StackFrameIterator it(isolate);
- ASSERT(it.frame()->is_exit());
+ DCHECK(it.frame()->is_exit());
it.Advance();
StackFrame* frame = it.frame();
bool reference_result = frame->is_construct();
const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT);
Object* marker = Memory::Object_at(caller_fp + kMarkerOffset);
bool result = (marker == kConstructMarker);
- ASSERT_EQ(result, reference_result);
+ DCHECK_EQ(result, reference_result);
return result;
}
#endif
}
-static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
- FixedArrayBase* elms,
- int to_trim) {
- ASSERT(heap->CanMoveObjectStart(elms));
-
- Map* map = elms->map();
- int entry_size;
- if (elms->IsFixedArray()) {
- entry_size = kPointerSize;
- } else {
- entry_size = kDoubleSize;
- }
- ASSERT(elms->map() != heap->fixed_cow_array_map());
- // For now this trick is only applied to fixed arrays in new and paged space.
- // In large object space the object's start must coincide with chunk
- // and thus the trick is just not applicable.
- ASSERT(!heap->lo_space()->Contains(elms));
-
- STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
- STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
- STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
-
- Object** former_start = HeapObject::RawField(elms, 0);
-
- const int len = elms->length();
-
- if (to_trim * entry_size > FixedArrayBase::kHeaderSize &&
- elms->IsFixedArray() &&
- !heap->new_space()->Contains(elms)) {
- // If we are doing a big trim in old space then we zap the space that was
- // formerly part of the array so that the GC (aided by the card-based
- // remembered set) won't find pointers to new-space there.
- Object** zap = reinterpret_cast<Object**>(elms->address());
- zap++; // Header of filler must be at least one word so skip that.
- for (int i = 1; i < to_trim; i++) {
- *zap++ = Smi::FromInt(0);
- }
- }
- // Technically in new space this write might be omitted (except for
- // debug mode which iterates through the heap), but to play safer
- // we still do it.
- // Since left trimming is only performed on pages which are not concurrently
- // swept creating a filler object does not require synchronization.
- heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
-
- int new_start_index = to_trim * (entry_size / kPointerSize);
- former_start[new_start_index] = map;
- former_start[new_start_index + 1] = Smi::FromInt(len - to_trim);
-
- // Maintain marking consistency for HeapObjectIterator and
- // IncrementalMarking.
- int size_delta = to_trim * entry_size;
- Address new_start = elms->address() + size_delta;
- heap->marking()->TransferMark(elms->address(), new_start);
- heap->AdjustLiveBytes(new_start, -size_delta, Heap::FROM_MUTATOR);
-
- FixedArrayBase* new_elms =
- FixedArrayBase::cast(HeapObject::FromAddress(new_start));
- HeapProfiler* profiler = heap->isolate()->heap_profiler();
- if (profiler->is_tracking_object_moves()) {
- profiler->ObjectMoveEvent(elms->address(),
- new_elms->address(),
- new_elms->Size());
- }
- return new_elms;
-}
-
-
static bool ArrayPrototypeHasNoElements(Heap* heap,
Context* native_context,
JSObject* array_proto) {
// fields.
if (array_proto->elements() != heap->empty_fixed_array()) return false;
// Object.prototype
- Object* proto = array_proto->GetPrototype();
- if (proto == heap->null_value()) return false;
- array_proto = JSObject::cast(proto);
+ PrototypeIterator iter(heap->isolate(), array_proto);
+ if (iter.IsAtEnd()) {
+ return false;
+ }
+ array_proto = JSObject::cast(iter.GetCurrent());
if (array_proto != native_context->initial_object_prototype()) return false;
if (array_proto->elements() != heap->empty_fixed_array()) return false;
- return array_proto->GetPrototype()->IsNull();
+ iter.Advance();
+ return iter.IsAtEnd();
}
if (first_added_arg >= args_length) return handle(array->elements(), isolate);
ElementsKind origin_kind = array->map()->elements_kind();
- ASSERT(!IsFastObjectElementsKind(origin_kind));
+ DCHECK(!IsFastObjectElementsKind(origin_kind));
ElementsKind target_kind = origin_kind;
{
DisallowHeapAllocation no_gc;
Context* native_context = heap->isolate()->context()->native_context();
JSObject* array_proto =
JSObject::cast(native_context->array_function()->prototype());
- return receiver->GetPrototype() == array_proto &&
+ PrototypeIterator iter(heap->isolate(), receiver);
+ return iter.GetCurrent() == array_proto &&
ArrayPrototypeHasNoElements(heap, native_context, array_proto);
}
if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) {
return CallJsBuiltin(isolate, "ArrayPush", args);
}
- ASSERT(!array->map()->is_observed());
+ DCHECK(!array->map()->is_observed());
ElementsKind kind = array->GetElementsKind();
}
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
- ASSERT(to_add <= (Smi::kMaxValue - len));
+ DCHECK(to_add <= (Smi::kMaxValue - len));
int new_length = len + to_add;
}
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
- ASSERT(to_add <= (Smi::kMaxValue - len));
+ DCHECK(to_add <= (Smi::kMaxValue - len));
int new_length = len + to_add;
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- ASSERT(!array->map()->is_observed());
+ DCHECK(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
if (len == 0) return isolate->heap()->undefined_value();
return CallJsBuiltin(isolate, "ArrayShift", args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- ASSERT(!array->map()->is_observed());
+ DCHECK(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
}
if (heap->CanMoveObjectStart(*elms_obj)) {
- array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1));
+ array->set_elements(heap->LeftTrimFixedArray(*elms_obj, 1));
} else {
// Shift the elements.
if (elms_obj->IsFixedArray()) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- ASSERT(!array->map()->is_observed());
+ DCHECK(!array->map()->is_observed());
if (!array->HasFastSmiOrObjectElements()) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
int new_length = len + to_add;
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
- ASSERT(to_add <= (Smi::kMaxValue - len));
+ DCHECK(to_add <= (Smi::kMaxValue - len));
if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
} else {
// Array.slice(arguments, ...) is quite a common idiom (notably more
// than 50% of invocations in Web apps). Treat it in C++ as well.
- Map* arguments_map = isolate->context()->native_context()->
- sloppy_arguments_boilerplate()->map();
+ Map* arguments_map =
+ isolate->context()->native_context()->sloppy_arguments_map();
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject() &&
}
}
- ASSERT(len >= 0);
+ DCHECK(len >= 0);
int n_arguments = args.length() - 1;
// Note carefully choosen defaults---if argument is missing,
return CallJsBuiltin(isolate, "ArraySplice", args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- ASSERT(!array->map()->is_observed());
+ DCHECK(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
// compatibility.
int actual_delete_count;
if (n_arguments == 1) {
- ASSERT(len - actual_start >= 0);
+ DCHECK(len - actual_start >= 0);
actual_delete_count = len - actual_start;
} else {
int value = 0; // ToInteger(undefined) == 0
if (heap->CanMoveObjectStart(*elms_obj)) {
// On the fast path we move the start of the object in memory.
- elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta), isolate);
+ elms_obj = handle(heap->LeftTrimFixedArray(*elms_obj, delta));
} else {
// This is the slow path. We are going to move the elements to the left
// by copying them. For trimmed values we store the hole.
Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
- ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
+ DCHECK((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
// Check if array need to grow.
if (new_length > elms->length()) {
bool is_holey = false;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
- if (!arg->IsJSArray() ||
- !JSArray::cast(arg)->HasFastElements() ||
- JSArray::cast(arg)->GetPrototype() != array_proto) {
+ PrototypeIterator iter(isolate, arg);
+ if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements() ||
+ iter.GetCurrent() != array_proto) {
AllowHeapAllocation allow_allocation;
return CallJsBuiltin(isolate, "ArrayConcatJS", args);
}
STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
USE(kHalfOfMaxInt);
result_len += len;
- ASSERT(result_len >= 0);
+ DCHECK(result_len >= 0);
if (result_len > FixedDoubleArray::kMaxLength) {
AllowHeapAllocation allow_allocation;
}
}
- ASSERT(j == result_len);
+ DCHECK(j == result_len);
return *result_array;
}
static inline Object* FindHidden(Heap* heap,
Object* object,
FunctionTemplateInfo* type) {
- if (type->IsTemplateFor(object)) return object;
- Object* proto = object->GetPrototype(heap->isolate());
- if (proto->IsJSObject() &&
- JSObject::cast(proto)->map()->is_hidden_prototype()) {
- return FindHidden(heap, proto, type);
+ for (PrototypeIterator iter(heap->isolate(), object,
+ PrototypeIterator::START_AT_RECEIVER);
+ !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
+ if (type->IsTemplateFor(iter.GetCurrent())) {
+ return iter.GetCurrent();
+ }
}
return heap->null_value();
}
template <bool is_construct>
MUST_USE_RESULT static Object* HandleApiCallHelper(
BuiltinArguments<NEEDS_CALLED_FUNCTION> args, Isolate* isolate) {
- ASSERT(is_construct == CalledAsConstructor(isolate));
+ DCHECK(is_construct == CalledAsConstructor(isolate));
Heap* heap = isolate->heap();
HandleScope scope(isolate);
Handle<JSFunction> function = args.called_function();
- ASSERT(function->shared()->IsApiFunction());
+ DCHECK(function->shared()->IsApiFunction());
Handle<FunctionTemplateInfo> fun_data(
function->shared()->get_api_func_data(), isolate);
SharedFunctionInfo* shared = function->shared();
if (shared->strict_mode() == SLOPPY && !shared->native()) {
Object* recv = args[0];
- ASSERT(!recv->IsNull());
- if (recv->IsUndefined()) {
- args[0] = function->context()->global_object()->global_receiver();
- }
+ DCHECK(!recv->IsNull());
+ if (recv->IsUndefined()) args[0] = function->global_proxy();
}
Object* raw_holder = TypeCheck(heap, args.length(), &args[0], *fun_data);
Object* result;
LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
- ASSERT(raw_holder->IsJSObject());
+ DCHECK(raw_holder->IsJSObject());
FunctionCallbackArguments custom(isolate,
data_obj,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
// Non-functions are never called as constructors. Even if this is an object
// called as a constructor the delegate call is not a construct call.
- ASSERT(!CalledAsConstructor(isolate));
+ DCHECK(!CalledAsConstructor(isolate));
Heap* heap = isolate->heap();
Handle<Object> receiver = args.receiver();
// Get the invocation callback from the function descriptor that was
// used to create the called object.
- ASSERT(obj->map()->has_instance_call_handler());
+ DCHECK(obj->map()->has_instance_call_handler());
JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
- ASSERT(constructor->shared()->IsApiFunction());
+ DCHECK(constructor->shared()->IsApiFunction());
Object* handler =
constructor->shared()->get_api_func_data()->instance_call_handler();
- ASSERT(!handler->IsUndefined());
+ DCHECK(!handler->IsUndefined());
CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
Object* callback_obj = call_data->callback();
v8::FunctionCallback callback =
static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
- LoadStubCompiler::GenerateLoadViaGetterForDeopt(masm);
+ NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(masm);
}
static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
- StoreStubCompiler::GenerateStoreViaSetterForDeopt(masm);
+ NamedStoreHandlerCompiler::GenerateStoreViaSetterForDeopt(masm);
}
void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
- ASSERT(!initialized_);
+ DCHECK(!initialized_);
// Create a scope for the handles in the builtins.
HandleScope scope(isolate);
// We pass all arguments to the generator, but it may not use all of
// them. This works because the first arguments are on top of the
// stack.
- ASSERT(!masm.has_frame());
+ DCHECK(!masm.has_frame());
g(&masm, functions[i].name, functions[i].extra_args);
// Move the code into the object heap.
CodeDesc desc;
// Log the event and add the code to the builtins array.
PROFILE(isolate,
CodeCreateEvent(Logger::BUILTIN_TAG, *code, functions[i].s_name));
- GDBJIT(AddCode(GDBJITInterface::BUILTIN, functions[i].s_name, *code));
builtins_[i] = *code;
+ if (code->kind() == Code::BUILTIN) code->set_builtin_index(i);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
- PrintF(trace_scope.file(), "Builtin: %s\n", functions[i].s_name);
- code->Disassemble(functions[i].s_name, trace_scope.file());
- PrintF(trace_scope.file(), "\n");
+ OFStream os(trace_scope.file());
+ os << "Builtin: " << functions[i].s_name << "\n";
+ code->Disassemble(functions[i].s_name, os);
+ os << "\n";
}
#endif
} else {
void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kHiddenInterrupt, 0, 1);
+ masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
}
void Builtins::Generate_StackCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kHiddenStackGuard, 0, 1);
+ masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
}