class FunctionTemplate;
class ObjectTemplate;
class Data;
-template<typename T> class FunctionCallbackInfo;
template<typename T> class PropertyCallbackInfo;
class StackTrace;
class StackFrame;
"Only externalized ArrayBuffers can be neutered");
LOG_API(obj->GetIsolate(), "v8::ArrayBuffer::Neuter()");
ENTER_V8(isolate);
-
- for (i::Handle<i::Object> view_obj(obj->weak_first_view(), isolate);
- !view_obj->IsUndefined();) {
- i::Handle<i::JSArrayBufferView> view(i::JSArrayBufferView::cast(*view_obj));
- if (view->IsJSTypedArray()) {
- i::JSTypedArray::cast(*view)->Neuter();
- } else if (view->IsJSDataView()) {
- i::JSDataView::cast(*view)->Neuter();
- } else {
- UNREACHABLE();
- }
- view_obj = i::handle(view->weak_next(), isolate);
- }
- obj->Neuter();
+ i::Runtime::NeuterArrayBuffer(obj);
}
instr->MarkAsCall();
instr = AssignPointerMap(instr);
- if (hinstr->HasObservableSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_.IsNone());
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = sim->ast_id();
- }
-
// If instruction does not have side-effects lazy deoptimization
// after the call will try to deoptimize to the point before the call.
// Thus we still need to attach environment to this call even if
instr = AssignEnvironment(instr);
}
chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = current;
+ LInstruction* instruction_needing_environment = NULL;
+ if (current->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(current->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
}
current_instruction_ = old_current;
}
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LInstruction* result = new(zone()) LLazyBailout;
- result = AssignEnvironment(result);
- // Store the lazy deopt environment with the instruction if needed. Right
- // now it is only used for LInstanceOfKnownGlobal.
- instruction_pending_deoptimization_environment_->
- SetDeferredLazyDeoptimizationEnvironment(result->environment());
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = BailoutId::None();
- return result;
- }
-
return NULL;
}
}
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
+}
+
+
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ nop(); // Signals no inlined code.
}
this, Safepoint::kWithRegistersAndDoubles);
__ Move(r0, object_reg);
__ Move(r1, to_map);
- TransitionElementsKindStub stub(from_kind, to_kind);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
__ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
// Code generation passes. Returns true if code generation should
// continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
}
var relativeStart = TO_INTEGER(start);
+ if (!IS_UNDEFINED(end)) {
+ end = TO_INTEGER(end);
+ }
var first;
var byte_length = %ArrayBufferGetByteLength(this);
if (relativeStart < 0) {
} else {
first = MathMin(relativeStart, byte_length);
}
- var relativeEnd = IS_UNDEFINED(end) ? byte_length : TO_INTEGER(end);
+ var relativeEnd = IS_UNDEFINED(end) ? byte_length : end;
var fin;
if (relativeEnd < 0) {
fin = MathMax(byte_length + relativeEnd, 0);
GetParameter(1),
casted_stub()->from_kind(),
casted_stub()->to_kind(),
- true);
+ casted_stub()->is_js_array());
return GetParameter(0);
}
class TransitionElementsKindStub : public HydrogenCodeStub {
public:
TransitionElementsKindStub(ElementsKind from_kind,
- ElementsKind to_kind) {
+ ElementsKind to_kind,
+ bool is_js_array) {
bit_field_ = FromKindBits::encode(from_kind) |
- ToKindBits::encode(to_kind);
+ ToKindBits::encode(to_kind) |
+ IsJSArrayBits::encode(is_js_array);
}
ElementsKind from_kind() const {
return ToKindBits::decode(bit_field_);
}
+ bool is_js_array() const {
+ return IsJSArrayBits::decode(bit_field_);
+ }
+
virtual Handle<Code> GenerateCode(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
private:
class FromKindBits: public BitField<ElementsKind, 8, 8> {};
class ToKindBits: public BitField<ElementsKind, 0, 8> {};
+ class IsJSArrayBits: public BitField<bool, 16, 1> {};
uint32_t bit_field_;
Major MajorKey() { return TransitionElementsKind; }
element = next;
}
+#ifdef DEBUG
+ // Make sure all activations of optimized code can deopt at their current PC.
+ for (StackFrameIterator it(isolate, isolate->thread_local_top());
+ !it.done(); it.Advance()) {
+ StackFrame::Type type = it.frame()->type();
+ if (type == StackFrame::OPTIMIZED) {
+ Code* code = it.frame()->LookupCode();
+ if (FLAG_trace_deopt) {
+ JSFunction* function =
+ static_cast<OptimizedFrame*>(it.frame())->function();
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(), "[deoptimizer patches for lazy deopt: ");
+ function->PrintName(scope.file());
+ PrintF(scope.file(),
+ " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+ }
+ SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
+ int deopt_index = safepoint.deoptimization_index();
+ CHECK(deopt_index != Safepoint::kNoDeoptimizationIndex);
+ }
+ }
+#endif
+
// TODO(titzer): we need a handle scope only because of the macro assembler,
// which is only used in EnsureCodeForDeoptimizationEntry.
HandleScope scope(isolate);
+
// Now patch all the codes for deoptimization.
for (int i = 0; i < codes.length(); i++) {
// It is finally time to die, code object.
if (FLAG_code_stats) ReportCodeStatistics("After GC");
#endif
if (FLAG_deopt_every_n_garbage_collections > 0) {
+ // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
+ // the topmost optimized frame can be deoptimized safely, because it
+ // might not have a lazy bailout point right after its current PC.
if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
Deoptimizer::DeoptimizeAll(isolate());
gcs_since_last_deopt_ = 0;
bool HasSingleCheck() { return lower_check_ == upper_check_; }
+ void UpdateUpperOffsets(HBoundsCheck* check, int32_t offset) {
+ BoundsCheckBbData* data = FatherInDominatorTree();
+ while (data != NULL && data->UpperCheck() == check) {
+ ASSERT(data->upper_offset_ <= offset);
+ data->upper_offset_ = offset;
+ data = data->FatherInDominatorTree();
+ }
+ }
+
+ void UpdateLowerOffsets(HBoundsCheck* check, int32_t offset) {
+ BoundsCheckBbData* data = FatherInDominatorTree();
+ while (data != NULL && data->LowerCheck() == check) {
+ ASSERT(data->lower_offset_ > offset);
+ data->lower_offset_ = offset;
+ data = data->FatherInDominatorTree();
+ }
+ }
+
// The goal of this method is to modify either upper_offset_ or
// lower_offset_ so that also new_offset is covered (the covered
// range grows).
upper_check_ = new_check;
} else {
TightenCheck(upper_check_, new_check);
+ UpdateUpperOffsets(upper_check_, upper_offset_);
}
} else if (new_offset < lower_offset_) {
lower_offset_ = new_offset;
lower_check_ = new_check;
} else {
TightenCheck(lower_check_, new_check);
+ UpdateLowerOffsets(lower_check_, lower_offset_);
}
} else {
// Should never have called CoverCheck() in this case.
void HSimulate::ReplayEnvironment(HEnvironment* env) {
+ if (done_with_replay_) return;
ASSERT(env != NULL);
env->set_ast_id(ast_id());
env->Drop(pop_count());
env->Push(value);
}
}
+ done_with_replay_ = true;
}
values_(2, zone),
assigned_indexes_(2, zone),
zone_(zone),
- removable_(removable) {}
+ removable_(removable),
+ done_with_replay_(false) {}
~HSimulate() {}
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
Zone* zone_;
- RemovableSimulate removable_;
+ RemovableSimulate removable_ : 2;
+ bool done_with_replay_ : 1;
#ifdef DEBUG
Handle<JSFunction> closure_;
HInstruction* object = Add<HAllocate>(object_size_constant, type,
pretenure_flag, instance_type, site_context->current());
+ // If allocation folding reaches Page::kMaxRegularHeapObjectSize the
+ // elements array may not get folded into the object. Hence, we set the
+ // elements pointer to empty fixed array and let store elimination remove
+ // this store in the folding case.
+ HConstant* empty_fixed_array = Add<HConstant>(
+ isolate()->factory()->empty_fixed_array());
+ Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+ empty_fixed_array, INITIALIZING_STORE);
+
BuildEmitObjectHeader(boilerplate_object, object);
Handle<FixedArrayBase> elements(boilerplate_object->elements());
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
}
BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ nop(); // Signals no inlined code.
}
__ mov(eax, object_reg);
}
__ mov(ebx, to_map);
- TransitionElementsKindStub stub(from_kind, to_kind);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
__ CallStub(&stub);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
instr->MarkAsCall();
instr = AssignPointerMap(instr);
- if (hinstr->HasObservableSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_.IsNone());
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = sim->ast_id();
- }
-
// If instruction does not have side-effects lazy deoptimization
// after the call will try to deoptimize to the point before the call.
// Thus we still need to attach environment to this call even if
chunk_->AddInstruction(clobber, current_block_);
}
chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = current;
+ LInstruction* instruction_needing_environment = NULL;
+ if (current->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(current->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
}
current_instruction_ = old_current;
}
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (!pending_deoptimization_ast_id_.IsNone()) {
- ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
- LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
- LInstruction* result = AssignEnvironment(lazy_bailout);
- // Store the lazy deopt environment with the instruction if needed. Right
- // now it is only used for LInstanceOfKnownGlobal.
- instruction_pending_deoptimization_environment_->
- SetDeferredLazyDeoptimizationEnvironment(result->environment());
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = BailoutId::None();
- return result;
- }
-
return NULL;
}
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- allocator_(allocator),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
+ allocator_(allocator) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
HBasicBlock* current_block_;
HBasicBlock* next_block_;
LAllocator* allocator_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
}
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
+}
+
+
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
this, Safepoint::kWithRegistersAndDoubles);
__ mov(a0, object_reg);
__ li(a1, Operand(to_map));
- TransitionElementsKindStub stub(from_kind, to_kind);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
__ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
// Code generation passes. Returns true if code generation should
// continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
instr->MarkAsCall();
instr = AssignPointerMap(instr);
- if (hinstr->HasObservableSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_.IsNone());
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = sim->ast_id();
- }
-
// If instruction does not have side-effects lazy deoptimization
// after the call will try to deoptimize to the point before the call.
// Thus we still need to attach environment to this call even if
instr = AssignEnvironment(instr);
}
chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = current;
+ LInstruction* instruction_needing_environment = NULL;
+ if (current->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(current->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
}
current_instruction_ = old_current;
}
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LInstruction* result = new(zone()) LLazyBailout;
- result = AssignEnvironment(result);
- // Store the lazy deopt environment with the instruction if needed. Right
- // now it is only used for LInstanceOfKnownGlobal.
- instruction_pending_deoptimization_environment_->
- SetDeferredLazyDeoptimizationEnvironment(result->environment());
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = BailoutId::None();
- return result;
- }
-
return NULL;
}
current_block_(NULL),
next_block_(NULL),
allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
+ position_(RelocInfo::kNoPosition) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
HBasicBlock* next_block_;
LAllocator* allocator_;
int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
}
+void Runtime::NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer) {
+ Isolate* isolate = array_buffer->GetIsolate();
+ for (Handle<Object> view_obj(array_buffer->weak_first_view(), isolate);
+ !view_obj->IsUndefined();) {
+ Handle<JSArrayBufferView> view(JSArrayBufferView::cast(*view_obj));
+ if (view->IsJSTypedArray()) {
+ JSTypedArray::cast(*view)->Neuter();
+ } else if (view->IsJSDataView()) {
+ JSDataView::cast(*view)->Neuter();
+ } else {
+ UNREACHABLE();
+ }
+ view_obj = handle(view->weak_next(), isolate);
+ }
+ array_buffer->Neuter();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
if (target_length == 0) return isolate->heap()->undefined_value();
- ASSERT(NumberToSize(isolate, source->byte_length()) - target_length >= start);
+ size_t source_byte_length = NumberToSize(isolate, source->byte_length());
+ CHECK(start <= source_byte_length);
+ CHECK(source_byte_length - start >= target_length);
uint8_t* source_data = reinterpret_cast<uint8_t*>(source->backing_store());
uint8_t* target_data = reinterpret_cast<uint8_t*>(target->backing_store());
CopyBytes(target_data, source_data + start, target_length);
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferNeuter) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0);
+ ASSERT(!array_buffer->is_external());
+ void* backing_store = array_buffer->backing_store();
+ size_t byte_length = NumberToSize(isolate, array_buffer->byte_length());
+ array_buffer->set_is_external(true);
+ Runtime::NeuterArrayBuffer(array_buffer);
+ V8::ArrayBufferAllocator()->Free(backing_store, byte_length);
+ return isolate->heap()->undefined_value();
+}
+
+
void Runtime::ArrayIdToTypeAndSize(
int arrayId, ExternalArrayType* array_type, size_t* element_size) {
switch (arrayId) {
size_t byte_offset = NumberToSize(isolate, *byte_offset_object);
size_t byte_length = NumberToSize(isolate, *byte_length_object);
- ASSERT(byte_length % element_size == 0);
+ size_t array_buffer_byte_length =
+ NumberToSize(isolate, buffer->byte_length());
+ CHECK(byte_offset <= array_buffer_byte_length);
+ CHECK(array_buffer_byte_length - byte_offset >= byte_length);
+
+ CHECK_EQ(0, static_cast<int>(byte_length % element_size));
size_t length = byte_length / element_size;
if (length > static_cast<unsigned>(Smi::kMaxValue)) {
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
+ if (obj->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccessWrapper(obj,
+ isolate->factory()->proto_string(),
+ v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(*obj, v8::ACCESS_SET);
+ RETURN_IF_SCHEDULED_EXCEPTION(isolate);
+ return isolate->heap()->undefined_value();
+ }
if (FLAG_harmony_observation && obj->map()->is_observed()) {
Handle<Object> old_value(
GetPrototypeSkipHiddenPrototypes(isolate, *obj), isolate);
F(ArrayBufferGetByteLength, 1, 1)\
F(ArrayBufferSliceImpl, 3, 1) \
F(ArrayBufferIsView, 1, 1) \
+ F(ArrayBufferNeuter, 1, 1) \
\
F(TypedArrayInitialize, 5, 1) \
F(TypedArrayInitializeFromArrayLike, 4, 1) \
size_t allocated_length,
bool initialize = true);
+ static void NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer);
+
static void FreeArrayBuffer(
Isolate* isolate,
JSArrayBuffer* phantom_array_buffer);
// Record deoptimization index for lazy deoptimization for the last
// outstanding safepoints.
void RecordLazyDeoptimizationIndex(int index);
+ void BumpLastLazySafepointIndex() {
+ last_lazy_safepoint_ = deopt_index_list_.length();
+ }
// Emit the safepoint table after the body. The number of bits per
// entry must be enough to hold all the pointer indexes.
const int kDebugRegisterBits = 4;
const int kDebugIdShift = kDebugRegisterBits;
-const int kDeoptTableSerializeEntryCount = 8;
+const int kDeoptTableSerializeEntryCount = 12;
// ExternalReferenceTable is a helper class that defines the relationship
// between external references and their encodings. It is used to build
macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
+ if (!IS_UNDEFINED(byteOffset)) {
+ byteOffset =
+ ToPositiveInteger(byteOffset, "invalid_typed_array_length");
+ }
+ if (!IS_UNDEFINED(length)) {
+ length = ToPositiveInteger(length, "invalid_typed_array_length");
+ }
+
var bufferByteLength = %ArrayBufferGetByteLength(buffer);
var offset;
if (IS_UNDEFINED(byteOffset)) {
offset = 0;
} else {
- offset = ToPositiveInteger(byteOffset, "invalid_typed_array_length");
+ offset = byteOffset;
if (offset % ELEMENT_SIZE !== 0) {
throw MakeRangeError("invalid_typed_array_alignment",
newByteLength = bufferByteLength - offset;
newLength = newByteLength / ELEMENT_SIZE;
} else {
- var newLength = ToPositiveInteger(length, "invalid_typed_array_length");
+ var newLength = length;
newByteLength = newLength * ELEMENT_SIZE;
}
if ((offset + newByteLength > bufferByteLength)
function NAMEConstructByArrayLike(obj, arrayLike) {
var length = arrayLike.length;
var l = ToPositiveInteger(length, "invalid_typed_array_length");
+
if (l > %MaxSmi()) {
throw MakeRangeError("invalid_typed_array_length");
}
function CreateSubArray(elementSize, constructor) {
return function(begin, end) {
- var srcLength = %TypedArrayGetLength(this);
var beginInt = TO_INTEGER(begin);
+ if (!IS_UNDEFINED(end)) {
+ end = TO_INTEGER(end);
+ }
+
+ var srcLength = %TypedArrayGetLength(this);
if (beginInt < 0) {
beginInt = MathMax(0, srcLength + beginInt);
} else {
beginInt = MathMin(srcLength, beginInt);
}
- var endInt = IS_UNDEFINED(end) ? srcLength : TO_INTEGER(end);
+ var endInt = IS_UNDEFINED(end) ? srcLength : end;
if (endInt < 0) {
endInt = MathMax(0, srcLength + endInt);
} else {
if (!IS_ARRAYBUFFER(buffer)) {
throw MakeTypeError('data_view_not_array_buffer', []);
}
+ if (!IS_UNDEFINED(byteOffset)) {
+ byteOffset = ToPositiveInteger(byteOffset, 'invalid_data_view_offset');
+ }
+ if (!IS_UNDEFINED(byteLength)) {
+ byteLength = TO_INTEGER(byteLength);
+ }
+
var bufferByteLength = %ArrayBufferGetByteLength(buffer);
- var offset = IS_UNDEFINED(byteOffset) ?
- 0 : ToPositiveInteger(byteOffset, 'invalid_data_view_offset');
+
+ var offset = IS_UNDEFINED(byteOffset) ? 0 : byteOffset;
if (offset > bufferByteLength) {
throw MakeRangeError('invalid_data_view_offset');
}
- var length = IS_UNDEFINED(byteLength) ?
- bufferByteLength - offset : TO_INTEGER(byteLength);
+
+ var length = IS_UNDEFINED(byteLength)
+ ? bufferByteLength - offset
+ : byteLength;
if (length < 0 || offset + length > bufferByteLength) {
throw new MakeRangeError('invalid_data_view_length');
}
#define MAJOR_VERSION 3
#define MINOR_VERSION 24
#define BUILD_NUMBER 35
-#define PATCH_LEVEL 17
+#define PATCH_LEVEL 22
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
}
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (!instr->IsLazyBailout() && !instr->IsGap()) {
+ safepoints_.BumpLastLazySafepointIndex();
+ }
+}
+
+
bool LCodeGen::GenerateJumpTable() {
Label needs_frame;
if (jump_table_.length() > 0) {
BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ nop(); // Signals no inlined code.
}
__ movp(rax, object_reg);
}
__ Move(rbx, to_map);
- TransitionElementsKindStub stub(from_kind, to_kind);
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+ TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
__ CallStub(&stub);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
// Code generation passes. Returns true if code generation should
// continue.
+ void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
instr->MarkAsCall();
instr = AssignPointerMap(instr);
- if (hinstr->HasObservableSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_.IsNone());
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = sim->ast_id();
- }
-
// If instruction does not have side-effects lazy deoptimization
// after the call will try to deoptimize to the point before the call.
// Thus we still need to attach environment to this call even if
instr = AssignEnvironment(instr);
}
chunk_->AddInstruction(instr, current_block_);
+
+ if (instr->IsCall()) {
+ HValue* hydrogen_value_for_lazy_bailout = current;
+ LInstruction* instruction_needing_environment = NULL;
+ if (current->HasObservableSideEffects()) {
+ HSimulate* sim = HSimulate::cast(current->next());
+ instruction_needing_environment = instr;
+ sim->ReplayEnvironment(current_block_->last_environment());
+ hydrogen_value_for_lazy_bailout = sim;
+ }
+ LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+ bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+ chunk_->AddInstruction(bailout, current_block_);
+ if (instruction_needing_environment != NULL) {
+ // Store the lazy deopt environment with the instruction if needed.
+ // Right now it is only used for LInstanceOfKnownGlobal.
+ instruction_needing_environment->
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+ }
+ }
}
current_instruction_ = old_current;
}
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
- LInstruction* result = AssignEnvironment(lazy_bailout);
- // Store the lazy deopt environment with the instruction if needed. Right
- // now it is only used for LInstanceOfKnownGlobal.
- instruction_pending_deoptimization_environment_->
- SetDeferredLazyDeoptimizationEnvironment(result->environment());
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = BailoutId::None();
- return result;
- }
-
return NULL;
}
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- allocator_(allocator),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
+ allocator_(allocator) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
HBasicBlock* current_block_;
HBasicBlock* next_block_;
LAllocator* allocator_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
checker.Run(true, false);
checker.Run(false, false);
}
+
+
+TEST(Regress354123) {
+ LocalContext current;
+ v8::Isolate* isolate = current->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
+ templ->SetAccessCheckCallbacks(NamedAccessCounter, IndexedAccessCounter);
+ current->Global()->Set(v8_str("friend"), templ->NewInstance());
+
+ // Test access using __proto__ from the prototype chain.
+ named_access_count = 0;
+ CompileRun("friend.__proto__ = {};");
+ CHECK_EQ(2, named_access_count);
+ CompileRun("friend.__proto__;");
+ CHECK_EQ(4, named_access_count);
+
+ // Test access using __proto__ as a hijacked function (A).
+ named_access_count = 0;
+ CompileRun("var p = Object.prototype;"
+ "var f = Object.getOwnPropertyDescriptor(p, '__proto__').set;"
+ "f.call(friend, {});");
+ CHECK_EQ(1, named_access_count);
+ CompileRun("var p = Object.prototype;"
+ "var f = Object.getOwnPropertyDescriptor(p, '__proto__').get;"
+ "f.call(friend);");
+ CHECK_EQ(2, named_access_count);
+
+ // Test access using __proto__ as a hijacked function (B).
+ named_access_count = 0;
+ CompileRun("var f = Object.prototype.__lookupSetter__('__proto__');"
+ "f.call(friend, {});");
+ CHECK_EQ(1, named_access_count);
+ CompileRun("var f = Object.prototype.__lookupGetter__('__proto__');"
+ "f.call(friend);");
+ CHECK_EQ(2, named_access_count);
+
+ // Test access using Object.setPrototypeOf reflective method.
+ named_access_count = 0;
+ CompileRun("Object.setPrototypeOf(friend, {});");
+ CHECK_EQ(1, named_access_count);
+ CompileRun("Object.getPrototypeOf(friend);");
+ CHECK_EQ(2, named_access_count);
+}
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --allow-natives-syntax
+
+var __v_7 = { };
+function __f_8(base, condition) {
+ __v_7[base + 3] = 0;
+ __v_7[base + 4] = 0;
+ if (condition) {
+ __v_7[base + 0] = 0;
+ __v_7[base + 5] = 0;
+ } else {
+ __v_7[base + 0] = 0;
+ __v_7[base + 18] = 0;
+ }
+}
+__f_8(1, true);
+__f_8(1, false);
+%OptimizeFunctionOnNextCall(__f_8);
+__f_8(5, false);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+function __f_4(i1) {
+ return __v_3[i1] * __v_3[0];
+}
+function __f_3(i1) {
+ __f_4(i1);
+ __f_4(i1 + 16);
+ __f_4(i1 + 32);
+ %OptimizeFunctionOnNextCall(__f_4);
+ var x = __f_4(i1 + 993);
+ return x;
+}
+function __f_5() {
+ __v_3[0] = +__v_3[0];
+ gc();
+ __f_3(0) | 0;
+ __v_3 = /\u23a1|x/;
+ return 0;
+}
+var __v_3 = new Float32Array(1000);
+__f_5();
+__f_5();
+__f_5();
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var buffer1 = new ArrayBuffer(100 * 1024);
+
+var array1 = new Uint8Array(buffer1, {valueOf : function() {
+ %ArrayBufferNeuter(buffer1);
+ return 0;
+}});
+
+assertEquals(0, array1.length);
+
+var buffer2 = new ArrayBuffer(100 * 1024);
+
+assertThrows(function() {
+ var array2 = new Uint8Array(buffer2, 0, {valueOf : function() {
+ %ArrayBufferNeuter(buffer2);
+ return 100 * 1024;
+ }});
+}, RangeError);
+
+
+var buffer3 = new ArrayBuffer(100 * 1024 * 1024);
+var dataView1 = new DataView(buffer3, {valueOf : function() {
+ %ArrayBufferNeuter(buffer3);
+ return 0;
+}});
+
+assertEquals(0, dataView1.byteLength);
+
+var buffer4 = new ArrayBuffer(100 * 1024);
+assertThrows(function() {
+ var dataView2 = new DataView(buffer4, 0, {valueOf : function() {
+ %ArrayBufferNeuter(buffer4);
+ return 100 * 1024 * 1024;
+ }});
+}, RangeError);
+
+
+var buffer5 = new ArrayBuffer(100 * 1024);
+var buffer6 = buffer5.slice({valueOf : function() {
+ %ArrayBufferNeuter(buffer5);
+ return 0;
+}}, 100 * 1024 * 1024);
+assertEquals(0, buffer6.byteLength);
+
+
+var buffer7 = new ArrayBuffer(100 * 1024 * 1024);
+var buffer8 = buffer7.slice(0, {valueOf : function() {
+ %ArrayBufferNeuter(buffer7);
+ return 100 * 1024 * 1024;
+}});
+assertEquals(0, buffer8.byteLength);
+
+var buffer9 = new ArrayBuffer(1024);
+var array9 = new Uint8Array(buffer9);
+var array10 = array9.subarray({valueOf : function() {
+ %ArrayBufferNeuter(buffer9);
+ return 0;
+ }}, 1024);
+assertEquals(0, array9.length);
+assertEquals(0, array10.length);
+
+var buffer11 = new ArrayBuffer(1024);
+var array11 = new Uint8Array(buffer11);
+var array12 = array11.subarray(0, {valueOf : function() {
+ %ArrayBufferNeuter(buffer11);
+ return 1024;
+ }});
+assertEquals(0, array11.length);
+assertEquals(0, array12.length);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var dummy = new Int32Array(100);
+array = new Int32Array(100);
+var dummy2 = new Int32Array(100);
+
+array[-17] = 0;
+function fun(base,cond) {
+ array[base - 1] = 1;
+ array[base - 2] = 2;
+ if (cond) {
+ array[base - 4] = 3;
+ array[base - 5] = 4;
+ } else {
+ array[base - 6] = 5;
+ array[base - 100] = 777;
+ }
+}
+fun(5,true);
+fun(7,false);
+%OptimizeFunctionOnNextCall(fun);
+fun(7,false);
+
+for (var i = 0; i < dummy.length; i++) {
+ assertEquals(0, dummy[i]);
+}
+for (var i = 0; i < dummy2.length; i++) {
+ assertEquals(0, dummy2[i]);
+}