// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_DEOPTIMIZER_H_
#define V8_DEOPTIMIZER_H_
-#include "v8.h"
+#include "src/v8.h"
-#include "allocation.h"
-#include "macro-assembler.h"
-#include "zone-inl.h"
+#include "src/allocation.h"
+#include "src/macro-assembler.h"
+#include "src/zone-inl.h"
namespace v8 {
static inline double read_double_value(Address p) {
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- return Memory::double_at(p);
-#else // V8_HOST_CAN_READ_UNALIGNED
- // Prevent gcc from using load-double (mips ldc1) on (possibly)
- // non-64-bit aligned address.
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.u[0] = *reinterpret_cast<uint32_t*>(p);
- c.u[1] = *reinterpret_cast<uint32_t*>(p + 4);
- return c.d;
-#endif // V8_HOST_CAN_READ_UNALIGNED
+ double d;
+ memcpy(&d, p, sizeof(d));
+ return d;
}
+static inline simd128_value_t read_simd128_value(Address p) {
+ return *reinterpret_cast<simd128_value_t*>(p);
+}
class FrameDescription;
class TranslationIterator;
};
+template<typename T>
+class SIMD128MaterializationDescriptor BASE_EMBEDDED {
+ public:
+ SIMD128MaterializationDescriptor(T destination, simd128_value_t value)
+ : destination_(destination), value_(value) { }
+
+ T destination() const { return destination_; }
+ simd128_value_t value() const { return value_; }
+
+ private:
+ T destination_;
+ simd128_value_t value_;
+};
+
+
class ObjectMaterializationDescriptor BASE_EMBEDDED {
public:
ObjectMaterializationDescriptor(
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
- struct JumpTableEntry {
- inline JumpTableEntry(Address entry,
- Deoptimizer::BailoutType type,
- bool frame)
+ struct Reason {
+ Reason(int r, const char* m, const char* d)
+ : raw_position(r), mnemonic(m), detail(d) {}
+
+ bool operator==(const Reason& other) const {
+ return raw_position == other.raw_position &&
+ CStringEquals(mnemonic, other.mnemonic) &&
+ CStringEquals(detail, other.detail);
+ }
+
+ bool operator!=(const Reason& other) const { return !(*this == other); }
+
+ int raw_position;
+ const char* mnemonic;
+ const char* detail;
+ };
+
+ struct JumpTableEntry : public ZoneObject {
+ inline JumpTableEntry(Address entry, const Reason& the_reason,
+ Deoptimizer::BailoutType type, bool frame)
: label(),
address(entry),
+ reason(the_reason),
bailout_type(type),
- needs_frame(frame) { }
+ needs_frame(frame) {}
+
+ bool IsEquivalentTo(const JumpTableEntry& other) const {
+ return address == other.address && bailout_type == other.bailout_type &&
+ needs_frame == other.needs_frame &&
+ (!FLAG_trace_deopt || reason == other.reason);
+ }
+
Label label;
Address address;
+ Reason reason;
Deoptimizer::BailoutType bailout_type;
bool needs_frame;
};
Isolate* isolate);
static Deoptimizer* Grab(Isolate* isolate);
-#ifdef ENABLE_DEBUGGER_SUPPORT
// The returned object with information on the optimized frame needs to be
// freed before another one can be generated.
static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame,
Isolate* isolate);
static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
Isolate* isolate);
-#endif
// Makes sure that there is enough room in the relocation
// information of a code object to perform lazy deoptimization
~Deoptimizer();
void MaterializeHeapObjects(JavaScriptFrameIterator* it);
-#ifdef ENABLE_DEBUGGER_SUPPORT
+
void MaterializeHeapNumbersForDebuggerInspectableFrame(
Address parameters_top,
uint32_t parameters_size,
Address expressions_top,
uint32_t expressions_size,
DeoptimizedFrameInfo* info);
-#endif
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
void DoComputeCompiledStubFrame(TranslationIterator* iterator,
int frame_index);
+ // Translate object, store the result into an auxiliary array
+ // (deferred_objects_tagged_values_).
void DoTranslateObject(TranslationIterator* iterator,
int object_index,
int field_index);
+ // Translate value, store the result into the given frame slot.
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
+ // Translate object, do not store the result anywhere (but do update
+ // the deferred materialization array).
+ void DoTranslateObjectAndSkip(TranslationIterator* iterator);
+
unsigned ComputeInputFrameSize() const;
unsigned ComputeFixedSize(JSFunction* function) const;
void AddObjectDuplication(intptr_t slot, int object_index);
void AddObjectTaggedValue(intptr_t value);
void AddObjectDoubleValue(double value);
+ void AddObjectSIMD128Value(simd128_value_t value, int translation_opcode);
void AddDoubleValue(intptr_t slot_address, double value);
+ void AddSIMD128Value(intptr_t slot_address, simd128_value_t value,
+ int translation_opcode);
bool ArgumentsObjectIsAdapted(int object_index) {
ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
// Fill the given output frame's registers to contain the failure handler
// address and the number of parameters for a stub failure trampoline.
void SetPlatformCompiledStubRegisters(FrameDescription* output_frame,
- CodeStubInterfaceDescriptor* desc);
+ CodeStubDescriptor* desc);
- // Fill the given output frame's double registers with the original values
- // from the input frame's double registers.
- void CopyDoubleRegisters(FrameDescription* output_frame);
+ // Fill the given output frame's simd128 registers with the original values
+ // from the input frame's simd128 registers.
+ void CopySIMD128Registers(FrameDescription* output_frame);
// Determines whether the input frame contains alignment padding by looking
// at the dynamic alignment state slot inside the frame.
bool HasAlignmentPadding(JSFunction* function);
- // Select the version of NotifyStubFailure builtin that either saves or
- // doesn't save the double registers depending on CPU features.
- Code* NotifyStubFailureBuiltin();
-
Isolate* isolate_;
JSFunction* function_;
Code* compiled_code_;
List<Object*> deferred_objects_tagged_values_;
List<HeapNumberMaterializationDescriptor<int> >
deferred_objects_double_values_;
+ List<SIMD128MaterializationDescriptor<int> >
+ deferred_objects_float32x4_values_;
+ List<SIMD128MaterializationDescriptor<int> >
+ deferred_objects_float64x2_values_;
+ List<SIMD128MaterializationDescriptor<int> >
+ deferred_objects_int32x4_values_;
List<ObjectMaterializationDescriptor> deferred_objects_;
List<HeapNumberMaterializationDescriptor<Address> > deferred_heap_numbers_;
+ List<SIMD128MaterializationDescriptor<Address> > deferred_float32x4s_;
+ List<SIMD128MaterializationDescriptor<Address> > deferred_float64x2s_;
+ List<SIMD128MaterializationDescriptor<Address> > deferred_int32x4s_;
+
+ // Key for lookup of previously materialized objects
+ Address stack_fp_;
+ Handle<FixedArray> previously_materialized_objects_;
+ int prev_materialized_count_;
// Output frame information. Only used during heap object materialization.
List<Handle<JSFunction> > jsframe_functions_;
}
uint32_t GetFrameSize() const {
- ASSERT(static_cast<uint32_t>(frame_size_) == frame_size_);
+ DCHECK(static_cast<uint32_t>(frame_size_) == frame_size_);
return static_cast<uint32_t>(frame_size_);
}
return read_double_value(reinterpret_cast<Address>(ptr));
}
+ simd128_value_t GetSIMD128FrameSlot(unsigned offset) {
+ intptr_t* ptr = GetFrameSlotPointer(offset);
+ return read_simd128_value(reinterpret_cast<Address>(ptr));
+ }
+
void SetFrameSlot(unsigned offset, intptr_t value) {
*GetFrameSlotPointer(offset) = value;
}
void SetCallerFp(unsigned offset, intptr_t value);
+ void SetCallerConstantPool(unsigned offset, intptr_t value);
+
intptr_t GetRegister(unsigned n) const {
#if DEBUG
- // This convoluted ASSERT is needed to work around a gcc problem that
+ // This convoluted DCHECK is needed to work around a gcc problem that
// improperly detects an array bounds overflow in optimized debug builds
- // when using a plain ASSERT.
- if (n >= ARRAY_SIZE(registers_)) {
- ASSERT(false);
+ // when using a plain DCHECK.
+ if (n >= arraysize(registers_)) {
+ DCHECK(false);
return 0;
}
#endif
return registers_[n];
}
- double GetDoubleRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(double_registers_));
- return double_registers_[n];
+ double GetDoubleRegister(unsigned n) const;
+
+ simd128_value_t GetSIMD128Register(unsigned n) const {
+ DCHECK(n < arraysize(simd128_registers_));
+ return simd128_registers_[n];
}
void SetRegister(unsigned n, intptr_t value) {
- ASSERT(n < ARRAY_SIZE(registers_));
+ DCHECK(n < arraysize(registers_));
registers_[n] = value;
}
- void SetDoubleRegister(unsigned n, double value) {
- ASSERT(n < ARRAY_SIZE(double_registers_));
- double_registers_[n] = value;
+ void SetDoubleRegister(unsigned n, double value);
+
+ void SetSIMD128Register(unsigned n, simd128_value_t value) {
+ DCHECK(n < arraysize(simd128_registers_));
+ simd128_registers_[n] = value;
}
intptr_t GetTop() const { return top_; }
return OFFSET_OF(FrameDescription, registers_);
}
- static int double_registers_offset() {
- return OFFSET_OF(FrameDescription, double_registers_);
+ static int simd128_registers_offset() {
+ return OFFSET_OF(FrameDescription, simd128_registers_);
}
static int frame_size_offset() {
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
intptr_t registers_[Register::kNumRegisters];
- double double_registers_[DoubleRegister::kMaxNumRegisters];
+ simd128_value_t simd128_registers_[SIMD128Register::kMaxNumRegisters];
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
intptr_t frame_content_[1];
intptr_t* GetFrameSlotPointer(unsigned offset) {
- ASSERT(offset < frame_size_);
+ DCHECK(offset < frame_size_);
return reinterpret_cast<intptr_t*>(
reinterpret_cast<Address>(this) + frame_content_offset() + offset);
}
explicit DeoptimizerData(MemoryAllocator* allocator);
~DeoptimizerData();
-#ifdef ENABLE_DEBUGGER_SUPPORT
void Iterate(ObjectVisitor* v);
-#endif
private:
MemoryAllocator* allocator_;
int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry];
MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry];
-#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo* deoptimized_frame_info_;
-#endif
Deoptimizer* current_;
public:
TranslationIterator(ByteArray* buffer, int index)
: buffer_(buffer), index_(index) {
- ASSERT(index >= 0 && index < buffer->length());
+ DCHECK(index >= 0 && index < buffer->length());
}
int32_t Next();
V(INT32_REGISTER) \
V(UINT32_REGISTER) \
V(DOUBLE_REGISTER) \
+ V(FLOAT32x4_REGISTER) \
+ V(FLOAT64x2_REGISTER) \
+ V(INT32x4_REGISTER) \
V(STACK_SLOT) \
V(INT32_STACK_SLOT) \
V(UINT32_STACK_SLOT) \
V(DOUBLE_STACK_SLOT) \
+ V(FLOAT32x4_STACK_SLOT) \
+ V(FLOAT64x2_STACK_SLOT) \
+ V(INT32x4_STACK_SLOT) \
V(LITERAL)
void StoreInt32Register(Register reg);
void StoreUint32Register(Register reg);
void StoreDoubleRegister(DoubleRegister reg);
+ void StoreSIMD128Register(SIMD128Register reg, Opcode opcode);
void StoreStackSlot(int index);
void StoreInt32StackSlot(int index);
void StoreUint32StackSlot(int index);
void StoreDoubleStackSlot(int index);
+ void StoreSIMD128StackSlot(int index, Opcode opcode);
void StoreLiteral(int literal_id);
void StoreArgumentsObject(bool args_known, int args_index, int args_length);
INT32,
UINT32,
DOUBLE,
- LITERAL
+ FLOAT32x4,
+ FLOAT64x2,
+ INT32x4,
+ LITERAL,
+ DEFERRED_OBJECT, // Object captured by the escape analysis.
+ // The number of nested objects can be obtained
+ // with the DeferredObjectLength() method
+ // (the SlotRefs of the nested objects follow
+ // this SlotRef in the depth-first order.)
+ DUPLICATE_OBJECT, // Duplicated object of a deferred object.
+ ARGUMENTS_OBJECT // Arguments object - only used to keep indexing
+ // in sync, it should not be materialized.
};
SlotRef()
SlotRef(Isolate* isolate, Object* literal)
: literal_(literal, isolate), representation_(LITERAL) { }
- Handle<Object> GetValue(Isolate* isolate) {
- switch (representation_) {
- case TAGGED:
- return Handle<Object>(Memory::Object_at(addr_), isolate);
-
- case INT32: {
- int value = Memory::int32_at(addr_);
- if (Smi::IsValid(value)) {
- return Handle<Object>(Smi::FromInt(value), isolate);
- } else {
- return isolate->factory()->NewNumberFromInt(value);
- }
- }
-
- case UINT32: {
- uint32_t value = Memory::uint32_at(addr_);
- if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
- } else {
- return isolate->factory()->NewNumber(static_cast<double>(value));
- }
- }
-
- case DOUBLE: {
- double value = read_double_value(addr_);
- return isolate->factory()->NewNumber(value);
- }
-
- case LITERAL:
- return literal_;
-
- default:
- UNREACHABLE();
- return Handle<Object>::null();
+ static SlotRef NewArgumentsObject(int length) {
+ SlotRef slot;
+ slot.representation_ = ARGUMENTS_OBJECT;
+ slot.deferred_object_length_ = length;
+ return slot;
+ }
+
+ static SlotRef NewDeferredObject(int length) {
+ SlotRef slot;
+ slot.representation_ = DEFERRED_OBJECT;
+ slot.deferred_object_length_ = length;
+ return slot;
+ }
+
+ SlotRepresentation Representation() { return representation_; }
+
+ static SlotRef NewDuplicateObject(int id) {
+ SlotRef slot;
+ slot.representation_ = DUPLICATE_OBJECT;
+ slot.duplicate_object_id_ = id;
+ return slot;
+ }
+
+ int GetChildrenCount() {
+ if (representation_ == DEFERRED_OBJECT ||
+ representation_ == ARGUMENTS_OBJECT) {
+ return deferred_object_length_;
+ } else {
+ return 0;
}
}
- static Vector<SlotRef> ComputeSlotMappingForArguments(
- JavaScriptFrame* frame,
- int inlined_frame_index,
- int formal_parameter_count);
+ int DuplicateObjectId() { return duplicate_object_id_; }
+
+ Handle<Object> GetValue(Isolate* isolate);
private:
Address addr_;
Handle<Object> literal_;
SlotRepresentation representation_;
+ int deferred_object_length_;
+ int duplicate_object_id_;
+};
+
+class SlotRefValueBuilder BASE_EMBEDDED {
+ public:
+ SlotRefValueBuilder(
+ JavaScriptFrame* frame,
+ int inlined_frame_index,
+ int formal_parameter_count);
+
+ void Prepare(Isolate* isolate);
+ Handle<Object> GetNext(Isolate* isolate, int level);
+ void Finish(Isolate* isolate);
+
+ int args_length() { return args_length_; }
+
+ private:
+ List<Handle<Object> > materialized_objects_;
+ Handle<FixedArray> previously_materialized_objects_;
+ int prev_materialized_count_;
+ Address stack_frame_id_;
+ List<SlotRef> slot_refs_;
+ int current_slot_;
+ int args_length_;
+ int first_slot_index_;
+
+ static SlotRef ComputeSlotForNextArgument(
+ Translation::Opcode opcode,
+ TranslationIterator* iterator,
+ DeoptimizationInputData* data,
+ JavaScriptFrame* frame);
+
+ Handle<Object> GetPreviouslyMaterialized(Isolate* isolate, int length);
static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
if (slot_index >= 0) {
}
}
- static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame);
+ Handle<Object> GetDeferredObject(Isolate* isolate);
+};
- static void ComputeSlotsForArguments(
- Vector<SlotRef>* args_slots,
- TranslationIterator* iterator,
- DeoptimizationInputData* data,
- JavaScriptFrame* frame);
+class MaterializedObjectStore {
+ public:
+ explicit MaterializedObjectStore(Isolate* isolate) : isolate_(isolate) {
+ }
+
+ Handle<FixedArray> Get(Address fp);
+ void Set(Address fp, Handle<FixedArray> materialized_objects);
+ void Remove(Address fp);
+
+ private:
+ Isolate* isolate() { return isolate_; }
+ Handle<FixedArray> GetStackEntries();
+ Handle<FixedArray> EnsureStackEntries(int size);
+
+ int StackIdToIndex(Address fp);
+
+ Isolate* isolate_;
+ List<Address> frame_fps_;
};
-#ifdef ENABLE_DEBUGGER_SUPPORT
// Class used to represent an unoptimized frame when the debugger
// needs to inspect a frame that is part of an optimized frame. The
// internally used FrameDescription objects are not GC safe so for use
return function_;
}
+ // Get the frame context.
+ Object* GetContext() { return context_; }
+
// Check if this frame is preceded by construct stub frame. The bottom-most
// inlined frame might still be called by an uninlined construct stub.
bool HasConstructStub() {
// Get an incoming argument.
Object* GetParameter(int index) {
- ASSERT(0 <= index && index < parameters_count());
+ DCHECK(0 <= index && index < parameters_count());
return parameters_[index];
}
// Get an expression from the expression stack.
Object* GetExpression(int index) {
- ASSERT(0 <= index && index < expression_count());
+ DCHECK(0 <= index && index < expression_count());
return expression_stack_[index];
}
private:
// Set an incoming argument.
void SetParameter(int index, Object* obj) {
- ASSERT(0 <= index && index < parameters_count());
+ DCHECK(0 <= index && index < parameters_count());
parameters_[index] = obj;
}
// Set an expression on the expression stack.
void SetExpression(int index, Object* obj) {
- ASSERT(0 <= index && index < expression_count());
+ DCHECK(0 <= index && index < expression_count());
expression_stack_[index] = obj;
}
JSFunction* function_;
+ Object* context_;
bool has_construct_stub_;
int parameters_count_;
int expression_count_;
friend class Deoptimizer;
};
-#endif
} } // namespace v8::internal