static inline double read_double_value(Address p) {
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- return Memory::double_at(p);
-#else // V8_HOST_CAN_READ_UNALIGNED
- // Prevent gcc from using load-double (mips ldc1) on (possibly)
- // non-64-bit aligned address.
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.u[0] = *reinterpret_cast<uint32_t*>(p);
- c.u[1] = *reinterpret_cast<uint32_t*>(p + 4);
- return c.d;
-#endif // V8_HOST_CAN_READ_UNALIGNED
+ double d;
+ memcpy(&d, p, sizeof(d));
+ return d;
}
+static inline simd128_value_t read_simd128_value(Address p) {
+ return *reinterpret_cast<simd128_value_t*>(p);
+}
class FrameDescription;
class TranslationIterator;
};
+template<typename T>
+class SIMD128MaterializationDescriptor BASE_EMBEDDED {
+ public:
+ SIMD128MaterializationDescriptor(T destination, simd128_value_t value)
+ : destination_(destination), value_(value) { }
+
+ T destination() const { return destination_; }
+ simd128_value_t value() const { return value_; }
+
+ private:
+ T destination_;
+ simd128_value_t value_;
+};
+
+
class ObjectMaterializationDescriptor BASE_EMBEDDED {
public:
ObjectMaterializationDescriptor(
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
+ struct Reason {
+ Reason(int r, const char* m, const char* d)
+ : raw_position(r), mnemonic(m), detail(d) {}
+
+ bool operator==(const Reason& other) const {
+ return raw_position == other.raw_position &&
+ CStringEquals(mnemonic, other.mnemonic) &&
+ CStringEquals(detail, other.detail);
+ }
+
+ bool operator!=(const Reason& other) const { return !(*this == other); }
+
+ int raw_position;
+ const char* mnemonic;
+ const char* detail;
+ };
+
struct JumpTableEntry : public ZoneObject {
- inline JumpTableEntry(Address entry,
- Deoptimizer::BailoutType type,
- bool frame)
+ inline JumpTableEntry(Address entry, const Reason& the_reason,
+ Deoptimizer::BailoutType type, bool frame)
: label(),
address(entry),
+ reason(the_reason),
bailout_type(type),
- needs_frame(frame) { }
+ needs_frame(frame) {}
+
+ bool IsEquivalentTo(const JumpTableEntry& other) const {
+ return address == other.address && bailout_type == other.bailout_type &&
+ needs_frame == other.needs_frame &&
+ (!FLAG_trace_deopt || reason == other.reason);
+ }
+
Label label;
Address address;
+ Reason reason;
Deoptimizer::BailoutType bailout_type;
bool needs_frame;
};
void AddObjectDuplication(intptr_t slot, int object_index);
void AddObjectTaggedValue(intptr_t value);
void AddObjectDoubleValue(double value);
+ void AddObjectSIMD128Value(simd128_value_t value, int translation_opcode);
void AddDoubleValue(intptr_t slot_address, double value);
+ void AddSIMD128Value(intptr_t slot_address, simd128_value_t value,
+ int translation_opcode);
bool ArgumentsObjectIsAdapted(int object_index) {
ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
// Fill the given output frame's registers to contain the failure handler
// address and the number of parameters for a stub failure trampoline.
void SetPlatformCompiledStubRegisters(FrameDescription* output_frame,
- CodeStubInterfaceDescriptor* desc);
+ CodeStubDescriptor* desc);
- // Fill the given output frame's double registers with the original values
- // from the input frame's double registers.
- void CopyDoubleRegisters(FrameDescription* output_frame);
+ // Fill the given output frame's simd128 registers with the original values
+ // from the input frame's simd128 registers.
+ void CopySIMD128Registers(FrameDescription* output_frame);
// Determines whether the input frame contains alignment padding by looking
// at the dynamic alignment state slot inside the frame.
List<Object*> deferred_objects_tagged_values_;
List<HeapNumberMaterializationDescriptor<int> >
deferred_objects_double_values_;
+ List<SIMD128MaterializationDescriptor<int> >
+ deferred_objects_float32x4_values_;
+ List<SIMD128MaterializationDescriptor<int> >
+ deferred_objects_float64x2_values_;
+ List<SIMD128MaterializationDescriptor<int> >
+ deferred_objects_int32x4_values_;
List<ObjectMaterializationDescriptor> deferred_objects_;
List<HeapNumberMaterializationDescriptor<Address> > deferred_heap_numbers_;
+ List<SIMD128MaterializationDescriptor<Address> > deferred_float32x4s_;
+ List<SIMD128MaterializationDescriptor<Address> > deferred_float64x2s_;
+ List<SIMD128MaterializationDescriptor<Address> > deferred_int32x4s_;
// Key for lookup of previously materialized objects
Address stack_fp_;
}
uint32_t GetFrameSize() const {
- ASSERT(static_cast<uint32_t>(frame_size_) == frame_size_);
+ DCHECK(static_cast<uint32_t>(frame_size_) == frame_size_);
return static_cast<uint32_t>(frame_size_);
}
return read_double_value(reinterpret_cast<Address>(ptr));
}
+ simd128_value_t GetSIMD128FrameSlot(unsigned offset) {
+ intptr_t* ptr = GetFrameSlotPointer(offset);
+ return read_simd128_value(reinterpret_cast<Address>(ptr));
+ }
+
void SetFrameSlot(unsigned offset, intptr_t value) {
*GetFrameSlotPointer(offset) = value;
}
intptr_t GetRegister(unsigned n) const {
#if DEBUG
- // This convoluted ASSERT is needed to work around a gcc problem that
+ // This convoluted DCHECK is needed to work around a gcc problem that
// improperly detects an array bounds overflow in optimized debug builds
- // when using a plain ASSERT.
- if (n >= ARRAY_SIZE(registers_)) {
- ASSERT(false);
+ // when using a plain DCHECK.
+ if (n >= arraysize(registers_)) {
+ DCHECK(false);
return 0;
}
#endif
return registers_[n];
}
- double GetDoubleRegister(unsigned n) const {
- ASSERT(n < ARRAY_SIZE(double_registers_));
- return double_registers_[n];
+ double GetDoubleRegister(unsigned n) const;
+
+ simd128_value_t GetSIMD128Register(unsigned n) const {
+ DCHECK(n < arraysize(simd128_registers_));
+ return simd128_registers_[n];
}
void SetRegister(unsigned n, intptr_t value) {
- ASSERT(n < ARRAY_SIZE(registers_));
+ DCHECK(n < arraysize(registers_));
registers_[n] = value;
}
- void SetDoubleRegister(unsigned n, double value) {
- ASSERT(n < ARRAY_SIZE(double_registers_));
- double_registers_[n] = value;
+ void SetDoubleRegister(unsigned n, double value);
+
+ void SetSIMD128Register(unsigned n, simd128_value_t value) {
+ DCHECK(n < arraysize(simd128_registers_));
+ simd128_registers_[n] = value;
}
intptr_t GetTop() const { return top_; }
return OFFSET_OF(FrameDescription, registers_);
}
- static int double_registers_offset() {
- return OFFSET_OF(FrameDescription, double_registers_);
+ static int simd128_registers_offset() {
+ return OFFSET_OF(FrameDescription, simd128_registers_);
}
static int frame_size_offset() {
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
intptr_t registers_[Register::kNumRegisters];
- double double_registers_[DoubleRegister::kMaxNumRegisters];
+ simd128_value_t simd128_registers_[SIMD128Register::kMaxNumRegisters];
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
intptr_t frame_content_[1];
intptr_t* GetFrameSlotPointer(unsigned offset) {
- ASSERT(offset < frame_size_);
+ DCHECK(offset < frame_size_);
return reinterpret_cast<intptr_t*>(
reinterpret_cast<Address>(this) + frame_content_offset() + offset);
}
public:
TranslationIterator(ByteArray* buffer, int index)
: buffer_(buffer), index_(index) {
- ASSERT(index >= 0 && index < buffer->length());
+ DCHECK(index >= 0 && index < buffer->length());
}
int32_t Next();
V(INT32_REGISTER) \
V(UINT32_REGISTER) \
V(DOUBLE_REGISTER) \
+ V(FLOAT32x4_REGISTER) \
+ V(FLOAT64x2_REGISTER) \
+ V(INT32x4_REGISTER) \
V(STACK_SLOT) \
V(INT32_STACK_SLOT) \
V(UINT32_STACK_SLOT) \
V(DOUBLE_STACK_SLOT) \
+ V(FLOAT32x4_STACK_SLOT) \
+ V(FLOAT64x2_STACK_SLOT) \
+ V(INT32x4_STACK_SLOT) \
V(LITERAL)
void StoreInt32Register(Register reg);
void StoreUint32Register(Register reg);
void StoreDoubleRegister(DoubleRegister reg);
+ void StoreSIMD128Register(SIMD128Register reg, Opcode opcode);
void StoreStackSlot(int index);
void StoreInt32StackSlot(int index);
void StoreUint32StackSlot(int index);
void StoreDoubleStackSlot(int index);
+ void StoreSIMD128StackSlot(int index, Opcode opcode);
void StoreLiteral(int literal_id);
void StoreArgumentsObject(bool args_known, int args_index, int args_length);
INT32,
UINT32,
DOUBLE,
+ FLOAT32x4,
+ FLOAT64x2,
+ INT32x4,
LITERAL,
DEFERRED_OBJECT, // Object captured by the escape analysis.
// The number of nested objects can be obtained
return function_;
}
+ // Get the frame context.
+ Object* GetContext() { return context_; }
+
// Check if this frame is preceded by construct stub frame. The bottom-most
// inlined frame might still be called by an uninlined construct stub.
bool HasConstructStub() {
// Get an incoming argument.
Object* GetParameter(int index) {
- ASSERT(0 <= index && index < parameters_count());
+ DCHECK(0 <= index && index < parameters_count());
return parameters_[index];
}
// Get an expression from the expression stack.
Object* GetExpression(int index) {
- ASSERT(0 <= index && index < expression_count());
+ DCHECK(0 <= index && index < expression_count());
return expression_stack_[index];
}
private:
// Set an incoming argument.
void SetParameter(int index, Object* obj) {
- ASSERT(0 <= index && index < parameters_count());
+ DCHECK(0 <= index && index < parameters_count());
parameters_[index] = obj;
}
// Set an expression on the expression stack.
void SetExpression(int index, Object* obj) {
- ASSERT(0 <= index && index < expression_count());
+ DCHECK(0 <= index && index < expression_count());
expression_stack_[index] = obj;
}
JSFunction* function_;
+ Object* context_;
bool has_construct_stub_;
int parameters_count_;
int expression_count_;