LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* index = UseRegister(instr->index());
- return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
}
} } // namespace v8::internal
}
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(object);
+ __ Push(index);
+ __ mov(cp, Operand::Zero());
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ result_(result),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register result_;
+ Register object_;
+ Register index_;
+ };
+
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(
+ this, instr, result, object, index);
+
Label out_of_object, done;
+
+ __ tst(index, Operand(Smi::FromInt(1)));
+ __ b(ne, deferred->entry());
+ __ mov(index, Operand(index, ASR, 1));
+
__ cmp(index, Operand::Zero());
__ b(lt, &out_of_object);
__ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
__ ldr(result, FieldMemOperand(scratch,
FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
__ bind(&done);
}
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegisterAtStart(instr->object());
LOperand* index = UseRegister(instr->index());
- return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
}
}
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Push(object);
+ __ Push(index);
+ __ Mov(cp, 0);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ result_(result),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register result_;
+ Register object_;
+ Register index_;
+ };
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
__ AssertSmi(index);
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(
+ this, instr, result, object, index);
+
Label out_of_object, done;
+
+ __ TestAndBranchIfAnySet(
+ index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
+ __ Mov(index, Operand(index, ASR, 1));
+
__ Cmp(index, Smi::FromInt(0));
__ B(lt, &out_of_object);
__ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Ldr(result, FieldMemOperand(result,
FixedArray::kHeaderSize - kPointerSize));
+ __ Bind(deferred->exit());
__ Bind(&done);
}
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register result,
+ Register object,
+ Register index);
Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
if (field_index >= map->inobject_properties()) {
field_index = -(field_index - map->inobject_properties() + 1);
}
+ field_index = (field_index << 1)
+ | details.representation().IsDouble();
indices->set(index, Smi::FromInt(field_index));
}
}
HValue* index) {
SetOperandAt(0, object);
SetOperandAt(1, index);
+ SetChangesFlag(kNewSpacePromotion);
set_representation(Representation::Tagged());
}
}
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this);
+ __ push(object);
+ __ push(index);
+ __ xor_(esi, esi);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(object, eax);
+}
+
+
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register object,
+ Register index,
+ const X87Stack& x87_stack)
+ : LDeferredCode(codegen, x87_stack),
+ instr_(instr),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register object_;
+ Register index_;
+ };
+
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(
+ this, instr, object, index, x87_stack_);
+
Label out_of_object, done;
+ __ test(index, Immediate(Smi::FromInt(1)));
+ __ j(not_zero, deferred->entry());
+
+ __ sar(index, 1);
+
__ cmp(index, Immediate(0));
__ j(less, &out_of_object, Label::kNear);
__ mov(object, FieldOperand(object,
index,
times_half_pointer_size,
FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
__ bind(&done);
}
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* index = UseTempRegister(instr->index());
- return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
}
}
+Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
+ Representation representation,
+ int index) {
+ Isolate* isolate = object->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ object->FastPropertyAt(representation, index), Object);
+}
+
+
template<class ContextObject>
class JSObjectWalkVisitor {
public:
MUST_USE_RESULT inline MaybeObject* FastPropertyAt(
Representation representation,
int index);
+ static Handle<Object> FastPropertyAt(Handle<JSObject> object,
+ Representation representation,
+ int index);
inline Object* RawFastPropertyAt(int index);
inline void FastPropertyAtPut(int index, Object* value);
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LoadMutableDouble) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
+ int idx = index->value() >> 1;
+ if (idx < 0) {
+ idx = -idx + object->map()->inobject_properties() - 1;
+ }
+ return *JSObject::FastPropertyAt(object, Representation::Double(), idx);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_TryMigrateInstance) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
F(DebugCallbackSupportsStepping, 1, 1) \
F(DebugPrepareStepInIfStepping, 1, 1) \
F(FlattenString, 1, 1) \
+ F(LoadMutableDouble, 2, 1) \
F(TryMigrateInstance, 1, 1) \
F(NotifyContextDisposed, 0, 1) \
\
}
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index) {
+ PushSafepointRegistersScope scope(this);
+ __ Push(object);
+ __ Push(index);
+ __ xorp(rsi, rsi);
+ __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(object, rax);
+}
+
+
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+ class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+ public:
+ DeferredLoadMutableDouble(LCodeGen* codegen,
+ LLoadFieldByIndex* instr,
+ Register object,
+ Register index)
+ : LDeferredCode(codegen),
+ instr_(instr),
+ object_(object),
+ index_(index) {
+ }
+ virtual void Generate() V8_OVERRIDE {
+ codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+ }
+ virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+ private:
+ LLoadFieldByIndex* instr_;
+ Register object_;
+ Register index_;
+ };
+
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
+ DeferredLoadMutableDouble* deferred;
+ deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
+
Label out_of_object, done;
+ __ Move(kScratchRegister, Smi::FromInt(1));
+ __ testq(index, kScratchRegister);
+ __ j(not_zero, deferred->entry());
+
+ __ sar(index, Immediate(1));
+
__ SmiToInteger32(index, index);
__ cmpl(index, Immediate(0));
__ j(less, &out_of_object, Label::kNear);
index,
times_pointer_size,
FixedArray::kHeaderSize - kPointerSize));
+ __ bind(deferred->exit());
__ bind(&done);
}
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+ void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+ Register object,
+ Register index);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* index = UseTempRegister(instr->index());
- return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
+ LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+ LInstruction* result = DefineSameAsFirst(load);
+ return AssignPointerMap(result);
}
// Only applicable to TypedArrays.
"_TypedArrayInitialize": true,
+ // Only applicable to loading mutable doubles.
+ "LoadMutableDouble": true,
+
// Only applicable to generators.
"_GeneratorNext": true,
"_GeneratorThrow": true,
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {a:1.5, b:{}};
+
+function f(o) {
+ var result = [];
+ for (var k in o) {
+ result[result.length] = o[k];
+ }
+ return result;
+}
+
+f(o);
+f(o);
+%OptimizeFunctionOnNextCall(f);
+var array = f(o);
+o.a = 1.7;
+assertEquals(1.5, array[0]);