// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
namespace internal {
// -----------------------------------------------------------------------------
-// Operand and MemOperand
+// Operand and MemOperand.
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
// -----------------------------------------------------------------------------
-// RelocInfo
+// RelocInfo.
void RelocInfo::apply(intptr_t delta) {
// On MIPS we do not use pc relative addressing, so we don't need to patch the
// RelocInfo is needed when pointer must be updated/serialized, such as
// UpdatingVisitor in mark-compact.cc or Serializer in serialize.cc.
// It is ignored by visitors that do not need it.
- // Commenting out, to simplify arch-independednt changes.
+ // TODO(mips): Commenting out, to simplify arch-independent changes.
// GC won't work like this, but this commit is for asm/disasm/sim.
// visitor->VisitPointer(target_object_address(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+ visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
// RelocInfo is needed when external-references must be serialized by
// Serializer Visitor in serialize.cc. It is ignored by visitors that
// do not need it.
- // Commenting out, to simplify arch-independednt changes.
+ // TODO(mips): Commenting out, to simplify arch-independent changes.
// Serializer won't work like this, but this commit is for asm/disasm/sim.
// visitor->VisitExternalReference(target_reference_address(), this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
+ (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(heap, target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(this);
+ StaticVisitor::VisitCodeTarget(heap, this);
+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+ StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(this);
+ StaticVisitor::VisitDebugTarget(heap, this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
StaticVisitor::VisitRuntimeEntry(this);
// -----------------------------------------------------------------------------
-// Assembler
+// Assembler.
void Assembler::CheckBuffer() {
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#include "v8.h"
namespace v8 {
namespace internal {
-CpuFeatures::CpuFeatures()
- : supported_(0),
- enabled_(0),
- found_by_runtime_probing_(0) {
-}
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
-void CpuFeatures::Probe(bool portable) {
+void CpuFeatures::Probe() {
+ ASSERT(!initialized_);
+#ifdef DEBUG
+ initialized_ = true;
+#endif
// If the compiler is allowed to use fpu then we can use fpu too in our
// code generation.
#if !defined(__mips__)
supported_ |= 1u << FPU;
}
#else
- if (portable && Serializer::enabled()) {
+ if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize.
}
supported_ |= 1u << FPU;
found_by_runtime_probing_ |= 1u << FPU;
}
-
- if (!portable) found_by_runtime_probing_ = 0;
#endif
}
static const int kMinimalBufferSize = 4 * KB;
-Assembler::Assembler(void* buffer, int buffer_size)
- : AssemblerBase(Isolate::Current()),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
positions_recorder_(this),
- allow_peephole_optimization_(false) {
- // BUG(3245989): disable peephole optimization if crankshaft is enabled.
+ allow_peephole_optimization_(false),
+ emit_debug_code_(FLAG_debug_code) {
allow_peephole_optimization_ = FLAG_peephole_optimization;
if (buffer == NULL) {
// Do our own buffer management.
no_trampoline_pool_before_ = 0;
trampoline_pool_blocked_nesting_ = 0;
next_buffer_check_ = kMaxBranchOffset - kTrampolineSize;
+ internal_trampoline_exception_ = false;
+
+ ast_id_for_reloc_info_ = kNoASTId;
}
Assembler::~Assembler() {
if (own_buffer_) {
if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
+ buffer_size_ == kMinimalBufferSize) {
isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
-Register Assembler::GetRt(Instr instr) {
+Register Assembler::GetRtReg(Instr instr) {
Register rt;
- rt.code_ = (instr & kRtMask) >> kRtShift;
+ rt.code_ = (instr & kRtFieldMask) >> kRtShift;
return rt;
}
+Register Assembler::GetRsReg(Instr instr) {
+ Register rs;
+ rs.code_ = (instr & kRsFieldMask) >> kRsShift;
+ return rs;
+}
+
+
+Register Assembler::GetRdReg(Instr instr) {
+ Register rd;
+ rd.code_ = (instr & kRdFieldMask) >> kRdShift;
+ return rd;
+}
+
+
+uint32_t Assembler::GetRt(Instr instr) {
+ return (instr & kRtFieldMask) >> kRtShift;
+}
+
+
+uint32_t Assembler::GetRtField(Instr instr) {
+ return instr & kRtFieldMask;
+}
+
+
+uint32_t Assembler::GetRs(Instr instr) {
+ return (instr & kRsFieldMask) >> kRsShift;
+}
+
+
+uint32_t Assembler::GetRsField(Instr instr) {
+ return instr & kRsFieldMask;
+}
+
+
+uint32_t Assembler::GetRd(Instr instr) {
+ return (instr & kRdFieldMask) >> kRdShift;
+}
+
+
+uint32_t Assembler::GetRdField(Instr instr) {
+ return instr & kRdFieldMask;
+}
+
+
+uint32_t Assembler::GetSa(Instr instr) {
+ return (instr & kSaFieldMask) >> kSaShift;
+}
+
+
+uint32_t Assembler::GetSaField(Instr instr) {
+ return instr & kSaFieldMask;
+}
+
+
+uint32_t Assembler::GetOpcodeField(Instr instr) {
+ return instr & kOpcodeMask;
+}
+
+
+uint32_t Assembler::GetImmediate16(Instr instr) {
+ return instr & kImm16Mask;
+}
+
+
+uint32_t Assembler::GetLabelConst(Instr instr) {
+ return instr & ~kImm16Mask;
+}
+
+
bool Assembler::IsPop(Instr instr) {
return (instr & ~kRtMask) == kPopRegPattern;
}
bool Assembler::IsBranch(Instr instr) {
- uint32_t opcode = ((instr & kOpcodeMask));
- uint32_t rt_field = ((instr & kRtFieldMask));
- uint32_t rs_field = ((instr & kRsFieldMask));
- uint32_t label_constant = (instr & ~kImm16Mask);
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt_field = GetRtField(instr);
+ uint32_t rs_field = GetRsField(instr);
+ uint32_t label_constant = GetLabelConst(instr);
// Checks if the instruction is a branch.
return opcode == BEQ ||
opcode == BNE ||
opcode == BEQL ||
opcode == BNEL ||
opcode == BLEZL ||
- opcode == BGTZL||
+ opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
(opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
}
+bool Assembler::IsBeq(Instr instr) {
+ return GetOpcodeField(instr) == BEQ;
+}
+
+
+bool Assembler::IsBne(Instr instr) {
+ return GetOpcodeField(instr) == BNE;
+}
+
+
bool Assembler::IsNop(Instr instr, unsigned int type) {
// See Assembler::nop(type).
ASSERT(type < 32);
- uint32_t opcode = ((instr & kOpcodeMask));
- uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
- uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
- uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt = GetRt(instr);
+ uint32_t rs = GetRs(instr);
+ uint32_t sa = GetSa(instr);
// nop(type) == sll(zero_reg, zero_reg, type);
// Technically all these values will be 0 but
}
+bool Assembler::IsAndImmediate(Instr instr) {
+ return GetOpcodeField(instr) == ANDI;
+}
+
+
int Assembler::target_at(int32_t pos) {
Instr instr = instr_at(pos);
if ((instr & ~kImm16Mask) == 0) {
if (dist > kMaxBranchOffset) {
do {
int32_t trampoline_pos = get_trampoline_entry(fixup_pos);
+ if (kInvalidSlotPos == trampoline_pos) {
+ // Internal error.
+ return;
+ }
ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
target_at_put(fixup_pos, trampoline_pos);
fixup_pos = trampoline_pos;
} else if (dist < -kMaxBranchOffset) {
do {
int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false);
+ if (kInvalidSlotPos == trampoline_pos) {
+ // Internal error.
+ return;
+ }
ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset);
target_at_put(fixup_pos, trampoline_pos);
fixup_pos = trampoline_pos;
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+ ASSERT(CpuFeatures::IsEnabled(FPU));
Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
| (fd.code() << kFdShift) | func;
emit(instr);
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+ ASSERT(CpuFeatures::IsEnabled(FPU));
Instr instr = opcode | fmt | (rt.code() << kRtShift)
| (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
FPUControlRegister fs,
SecondaryField func) {
ASSERT(fs.is_valid() && rt.is_valid());
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+ ASSERT(CpuFeatures::IsEnabled(FPU));
Instr instr =
opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
emit(instr);
FPURegister ft,
int32_t j) {
ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+ ASSERT(CpuFeatures::IsEnabled(FPU));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
emit(instr);
// Returns the next free trampoline entry from the next trampoline pool.
int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) {
int trampoline_count = trampolines_.length();
- int32_t trampoline_entry = 0;
+ int32_t trampoline_entry = kInvalidSlotPos;
ASSERT(trampoline_count > 0);
- if (next_pool) {
- for (int i = 0; i < trampoline_count; i++) {
- if (trampolines_[i].start() > pos) {
- trampoline_entry = trampolines_[i].take_slot();
- break;
+ if (!internal_trampoline_exception_) {
+ if (next_pool) {
+ for (int i = 0; i < trampoline_count; i++) {
+ if (trampolines_[i].start() > pos) {
+ trampoline_entry = trampolines_[i].take_slot();
+ break;
+ }
}
- }
- } else { // Caller needs a trampoline entry from the previous pool.
- for (int i = trampoline_count-1; i >= 0; i--) {
- if (trampolines_[i].end() < pos) {
- trampoline_entry = trampolines_[i].take_slot();
- break;
+ } else { // Caller needs a trampoline entry from the previous pool.
+ for (int i = trampoline_count-1; i >= 0; i--) {
+ if (trampolines_[i].end() < pos) {
+ trampoline_entry = trampolines_[i].take_slot();
+ break;
+ }
}
}
+ if (kInvalidSlotPos == trampoline_entry) {
+ internal_trampoline_exception_ = true;
+ }
}
return trampoline_entry;
}
if (dist > kMaxBranchOffset) {
do {
int32_t trampoline_pos = get_trampoline_entry(target_pos);
+ if (kInvalidSlotPos == trampoline_pos) {
+ // Internal error.
+ return 0;
+ }
ASSERT((trampoline_pos - target_pos) > 0);
ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset);
target_at_put(trampoline_pos, target_pos);
} else if (dist < -kMaxBranchOffset) {
do {
int32_t trampoline_pos = get_trampoline_entry(target_pos, false);
+ if (kInvalidSlotPos == trampoline_pos) {
+ // Internal error.
+ return 0;
+ }
ASSERT((target_pos - trampoline_pos) > 0);
ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset);
target_at_put(trampoline_pos, target_pos);
// we delete both the push & pop and insert a register move.
// push ry, pop rx --> mov rx, ry.
Register reg_pushed, reg_popped;
- reg_pushed = GetRt(push_instr);
- reg_popped = GetRt(pop_instr);
+ reg_pushed = GetRtReg(push_instr);
+ reg_popped = GetRtReg(pop_instr);
pc_ -= 4 * kInstrSize;
// Insert a mov instruction, which is better than a pair of push & pop.
or_(reg_popped, reg_pushed, zero_reg);
if ((mem_write_instr & kRtMask) ==
(mem_read_instr & kRtMask)) {
// Pattern: push & pop from/to same register,
- // with a fp+offset lw in between.
+ // with a fp + offset lw in between.
//
// The following:
// addiu sp, sp, -4
emit(lw_instr);
}
if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
+ PrintF("%x push/pop -dead ldr fp + offset in middle\n",
+ pc_offset());
}
} else {
// Pattern: push & pop from/to different registers
Register reg_pushed, reg_popped;
if ((mem_read_instr & kRtMask) == (lw_instr & kRtMask)) {
- reg_pushed = GetRt(mem_write_instr);
- reg_popped = GetRt(mem_read_instr);
+ reg_pushed = GetRtReg(mem_write_instr);
+ reg_popped = GetRtReg(mem_read_instr);
pc_ -= 5 * kInstrSize;
or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
} else if ((mem_write_instr & kRtMask)
!= (lw_instr & kRtMask)) {
- reg_pushed = GetRt(mem_write_instr);
- reg_popped = GetRt(mem_read_instr);
+ reg_pushed = GetRtReg(mem_write_instr);
+ reg_popped = GetRtReg(mem_read_instr);
pc_ -= 5 * kInstrSize;
emit(lw_instr);
or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
!= (lw_instr & kRtMask)) ||
((mem_write_instr & kRtMask)
== (lw_instr & kRtMask)) ) {
- reg_pushed = GetRt(mem_write_instr);
- reg_popped = GetRt(mem_read_instr);
+ reg_pushed = GetRtReg(mem_write_instr);
+ reg_popped = GetRtReg(mem_read_instr);
pc_ -= 5 * kInstrSize;
or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
emit(lw_instr);
// mov ry, rx
Register reg_stored, reg_loaded;
- reg_stored = GetRt(sw_instr);
- reg_loaded = GetRt(lw_instr);
+ reg_stored = GetRtReg(sw_instr);
+ reg_loaded = GetRtReg(lw_instr);
pc_ -= 1 * kInstrSize;
// Insert a mov instruction, which is better than lw.
or_(reg_loaded, reg_stored, zero_reg); // Move instruction.
void Assembler::movt(Register rd, Register rs, uint16_t cc) {
Register rt;
- rt.code_ = (cc & 0x0003) << 2 | 1;
+ rt.code_ = (cc & 0x0007) << 2 | 1;
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
void Assembler::movf(Register rd, Register rs, uint16_t cc) {
Register rt;
- rt.code_ = (cc & 0x0003) << 2 | 0;
+ rt.code_ = (cc & 0x0007) << 2 | 0;
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
// Conditions.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+ ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(is_uint3(cc));
ASSERT((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
void Assembler::fcmp(FPURegister src1, const double src2,
FPUCondition cond) {
- ASSERT(isolate()->cpu_features()->IsSupported(FPU));
+ ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(src2 == 0.0);
mtc1(zero_reg, f14);
cvt_d_w(f14, f14);
void Assembler::bc1f(int16_t offset, uint16_t cc) {
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+ ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
void Assembler::bc1t(int16_t offset, uint16_t cc) {
- ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+ ASSERT(CpuFeatures::IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
return;
}
ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
- reloc_info_writer.Write(&rinfo);
+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+ ASSERT(ast_id_for_reloc_info_ != kNoASTId);
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
+ ast_id_for_reloc_info_ = kNoASTId;
+ reloc_info_writer.Write(&reloc_info_with_ast_id);
+ } else {
+ reloc_info_writer.Write(&rinfo);
+ }
}
}
Address Assembler::target_address_at(Address pc) {
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
- // Check we have 2 instructions generated by li.
- ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
- ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
- (instr2 & kOpcodeMask) == ORI ||
- (instr2 & kOpcodeMask) == LUI)));
- // Interpret these 2 instructions.
- if (instr1 == nopInstr) {
- if ((instr2 & kOpcodeMask) == ADDI) {
- return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
- } else if ((instr2 & kOpcodeMask) == ORI) {
- return reinterpret_cast<Address>(instr2 & kImm16Mask);
- } else if ((instr2 & kOpcodeMask) == LUI) {
- return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
- }
- } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
- // 32 bit value.
+ // Interpret 2 instructions generated by li: lui/ori
+ if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
+ // Assemble the 32 bit value.
return reinterpret_cast<Address>(
- (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
+ (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
}
- // We should never get here.
+ // We should never get here, force a bad address if we do.
UNREACHABLE();
return (Address)0x0;
}
void Assembler::set_target_address_at(Address pc, Address target) {
- // On MIPS we need to patch the code to generate.
+ // On MIPS we patch the address into lui/ori instruction pair.
- // First check we have a li.
+ // First check we have an li (lui/ori pair).
Instr instr2 = instr_at(pc + kInstrSize);
#ifdef DEBUG
Instr instr1 = instr_at(pc);
// Check we have indeed the result from a li with MustUseReg true.
- CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
- ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
- (instr2 & kOpcodeMask)== ORI ||
- (instr2 & kOpcodeMask)== LUI)));
+ CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
#endif
- uint32_t rt_code = (instr2 & kRtFieldMask);
+ uint32_t rt_code = GetRtField(instr2);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
uint32_t itarget = reinterpret_cast<uint32_t>(target);
- if (is_int16(itarget)) {
- // nop.
- // addiu rt zero_reg j.
- *p = nopInstr;
- *(p+1) = ADDIU | rt_code | (itarget & kImm16Mask);
- } else if (!(itarget & kHiMask)) {
- // nop.
- // ori rt zero_reg j.
- *p = nopInstr;
- *(p+1) = ORI | rt_code | (itarget & kImm16Mask);
- } else if (!(itarget & kImm16Mask)) {
- // nop.
- // lui rt (kHiMask & itarget) >> kLuiShift.
- *p = nopInstr;
- *(p+1) = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
- } else {
- // lui rt (kHiMask & itarget) >> kLuiShift.
- // ori rt rt, (kImm16Mask & itarget).
- *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
- *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
- }
+ // lui rt, high-16.
+ // ori rt rt, low-16.
+ *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
+ *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
CPU::FlushICache(pc, 2 * sizeof(int32_t));
}
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
// -----------------------------------------------------------------------------
-// Implementation of Register and FPURegister
+// Implementation of Register and FPURegister.
// Core register.
struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters;
- static const int kNumAllocatableRegisters = 14; // v0 through t7
+ static const int kNumAllocatableRegisters = 14; // v0 through t7.
+ static const int kSizeInBytes = 4;
static int ToAllocationIndex(Register reg) {
return reg.code() - 2; // zero_reg and 'at' are skipped.
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
struct FPUControlRegister {
- static const int kFCSRRegister = 31;
- static const int kInvalidFPUControlRegister = -1;
-
bool is_valid() const { return code_ == kFCSRRegister; }
bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
int code() const {
int code_;
};
-const FPUControlRegister no_fpucreg = { -1 };
+const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
const FPUControlRegister FCSR = { kFCSRRegister };
private:
Register rm_;
- int32_t imm32_; // Valid if rm_ == no_reg
+ int32_t imm32_; // Valid if rm_ == no_reg.
RelocInfo::Mode rmode_;
friend class Assembler;
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
-class CpuFeatures {
+class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
- void Probe(bool portable);
+ static void Probe();
// Check whether a feature is supported by the target CPU.
- bool IsSupported(CpuFeature f) const {
+ static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
if (f == FPU && !FLAG_enable_fpu) return false;
return (supported_ & (1u << f)) != 0;
}
+
+#ifdef DEBUG
// Check whether a feature is currently enabled.
- bool IsEnabled(CpuFeature f) const {
- return (enabled_ & (1u << f)) != 0;
+ static bool IsEnabled(CpuFeature f) {
+ ASSERT(initialized_);
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL) {
+ // When no isolate is available, work as if we're running in
+ // release mode.
+ return IsSupported(f);
+ }
+ unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
+ return (enabled & (1u << f)) != 0;
}
+#endif
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
- explicit Scope(CpuFeature f)
- : cpu_features_(Isolate::Current()->cpu_features()),
- isolate_(Isolate::Current()) {
- ASSERT(cpu_features_->IsSupported(f));
+ explicit Scope(CpuFeature f) {
+ unsigned mask = 1u << f;
+ ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() ||
- (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
- old_enabled_ = cpu_features_->enabled_;
- cpu_features_->enabled_ |= 1u << f;
+ (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+ isolate_ = Isolate::UncheckedCurrent();
+ old_enabled_ = 0;
+ if (isolate_ != NULL) {
+ old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
+ isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+ }
}
~Scope() {
- ASSERT_EQ(Isolate::Current(), isolate_);
- cpu_features_->enabled_ = old_enabled_;
- }
+ ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+ if (isolate_ != NULL) {
+ isolate_->set_enabled_cpu_features(old_enabled_);
+ }
+ }
private:
- unsigned old_enabled_;
- CpuFeatures* cpu_features_;
Isolate* isolate_;
+ unsigned old_enabled_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
- private:
- CpuFeatures();
+ class TryForceFeatureScope BASE_EMBEDDED {
+ public:
+ explicit TryForceFeatureScope(CpuFeature f)
+ : old_supported_(CpuFeatures::supported_) {
+ if (CanForce()) {
+ CpuFeatures::supported_ |= (1u << f);
+ }
+ }
- unsigned supported_;
- unsigned enabled_;
- unsigned found_by_runtime_probing_;
+ ~TryForceFeatureScope() {
+ if (CanForce()) {
+ CpuFeatures::supported_ = old_supported_;
+ }
+ }
- friend class Isolate;
+ private:
+ static bool CanForce() {
+ // It's only safe to temporarily force support of CPU features
+ // when there's only a single isolate, which is guaranteed when
+ // the serializer is enabled.
+ return Serializer::enabled();
+ }
+
+ const unsigned old_supported_;
+ };
+
+ private:
+#ifdef DEBUG
+ static bool initialized_;
+#endif
+ static unsigned supported_;
+ static unsigned found_by_runtime_probing_;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
- Assembler(void* buffer, int buffer_size);
+ Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
// Overrides the default provided by FLAG_debug_code.
//
// Note: The same Label can be used for forward and backward branches
// but it may be bound only once.
- void bind(Label* L); // binds an unbound label L to the current code position
+ void bind(Label* L); // Binds an unbound label L to current code position.
- // Returns the branch offset to the given label from the current code position
- // Links the label to the current position if it is still unbound
+ // Returns the branch offset to the given label from the current code
+ // position. Links the label to the current position if it is still unbound.
// Manages the jump elimination optimization if the second parameter is true.
int32_t branch_offset(Label* L, bool jump_elimination_allowed);
int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
};
- // type == 0 is the default non-marking type.
+ // Type == 0 is the default non-marking type.
void nop(unsigned int type = 0) {
ASSERT(type < 32);
sll(zero_reg, zero_reg, type, true);
}
- //------- Branch and jump instructions --------
+ // --------Branch-and-jump-instructions----------
// We don't use likely variant of instructions.
void b(int16_t offset);
void b(Label* L) { b(branch_offset(L, false)>>2); }
}
// Never use the int16_t b(l)cond version with a branch offset
- // instead of using the Label* version. See Twiki for infos.
+ // instead of using the Label* version.
// Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
void j(int32_t target);
// Mark address of a debug break slot.
void RecordDebugBreakSlot();
+ // Record the AST id of the CallIC being compiled, so that it can be placed
+ // in the relocation information.
+ void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; }
+
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
// Check if an instruction is a branch of some kind.
static bool IsBranch(Instr instr);
+ static bool IsBeq(Instr instr);
+ static bool IsBne(Instr instr);
static bool IsNop(Instr instr, unsigned int type);
static bool IsPop(Instr instr);
static bool IsLwRegFpNegOffset(Instr instr);
static bool IsSwRegFpNegOffset(Instr instr);
- static Register GetRt(Instr instr);
+ static Register GetRtReg(Instr instr);
+ static Register GetRsReg(Instr instr);
+ static Register GetRdReg(Instr instr);
+
+ static uint32_t GetRt(Instr instr);
+ static uint32_t GetRtField(Instr instr);
+ static uint32_t GetRs(Instr instr);
+ static uint32_t GetRsField(Instr instr);
+ static uint32_t GetRd(Instr instr);
+ static uint32_t GetRdField(Instr instr);
+ static uint32_t GetSa(Instr instr);
+ static uint32_t GetSaField(Instr instr);
+ static uint32_t GetOpcodeField(Instr instr);
+ static uint32_t GetImmediate16(Instr instr);
+ static uint32_t GetLabelConst(Instr instr);
static int32_t GetBranchOffset(Instr instr);
static bool IsLw(Instr instr);
static bool IsAddImmediate(Instr instr);
static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
+ static bool IsAndImmediate(Instr instr);
+
void CheckTrampolinePool(bool force_emit = false);
protected:
+ // Relocation for a type-recording IC has the AST id added to it. This
+ // member variable is a way to pass the information from the call site to
+ // the relocation info.
+ unsigned ast_id_for_reloc_info_;
+
bool emit_debug_code() const { return emit_debug_code_; }
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
return trampoline_pool_blocked_nesting_ > 0;
}
+ bool has_exception() const {
+ return internal_trampoline_exception_;
+ }
+
private:
// Code buffer:
// The buffer into which code and relocation info are generated.
return end_;
}
int take_slot() {
- int trampoline_slot = next_slot_;
- ASSERT(free_slot_count_ > 0);
- free_slot_count_--;
- next_slot_ += 2 * kInstrSize;
+ int trampoline_slot = kInvalidSlotPos;
+ if (free_slot_count_ <= 0) {
+ // We have run out of space on trampolines.
+ // Make sure we fail in debug mode, so we become aware of each case
+ // when this happens.
+ ASSERT(0);
+ // Internal exception will be caught.
+ } else {
+ trampoline_slot = next_slot_;
+ free_slot_count_--;
+ next_slot_ += 2*kInstrSize;
+ }
return trampoline_slot;
}
int take_label() {
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
static const int kMaxDistBetweenPools =
kMaxBranchOffset - 2 * kTrampolineSize;
+ static const int kInvalidSlotPos = -1;
List<Trampoline> trampolines_;
+ bool internal_trampoline_exception_;
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
#if defined(V8_TARGET_ARCH_MIPS)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "bootstrapper.h"
#include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "regexp-macro-assembler.h"
namespace v8 {
}
-class FloatingPointHelper : public AllStatic {
- public:
-
- enum Destination {
- kFPURegisters,
- kCoreRegisters
- };
-
-
- // Loads smis from a0 and a1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
- // is floating point registers FPU must be supported. If core registers are
- // requested when FPU is supported f12 and f14 will be scratched.
- static void LoadSmis(MacroAssembler* masm,
- Destination destination,
- Register scratch1,
- Register scratch2);
-
- // Loads objects from a0 and a1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
- // is floating point registers FPU must be supported. If core registers are
- // requested when FPU is supported f12 and f14 will still be scratched. If
- // either a0 or a1 is not a number (not smi and not heap number object) the
- // not_number label is jumped to with a0 and a1 intact.
- static void LoadOperands(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
- // Loads the number from object into dst as a 32-bit integer if possible. If
- // the object is not a 32-bit integer control continues at the label
- // not_int32. If FPU is supported double_scratch is used but not scratch2.
- static void LoadNumberAsInteger(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister double_scratch,
- Label* not_int32);
- private:
- static void LoadNumber(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register object,
- FPURegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-};
-
-
void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register scratch1,
}
-void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- FPURegister double_scratch,
- Label* not_int32) {
+void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_number) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
+ Register int_scratch,
+ Destination destination,
+ FPURegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register scratch2,
+ FPURegister single_scratch) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
+ Register object,
+ Destination destination,
+ FPURegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister single_scratch,
+ Label* not_int32) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_int32) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
+ Register src1,
+ Register src2,
+ Register dst,
+ Register scratch,
+ Label* not_int32) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FloatingPointHelper::CallCCodeForDoubleOperation(
+ MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch) {
UNIMPLEMENTED_MIPS();
}
}
-// We fall into this code if the operands were Smis, but the result was
-// not (eg. overflow). We branch into this code (to the not_smi label) if
-// the operands were not both Smi. The operands are in lhs and rhs.
-// To call the C-implemented binary fp operation routines we need to end up
-// with the double precision floating point operands in a0 and a1 (for the
-// value in a1) and a2 and a3 (for the value in a0).
-void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin) {
+Handle<Code> GetTypeRecordingUnaryOpStub(int key,
+ TRUnaryOpIC::TypeInfo type_info) {
+ TypeRecordingUnaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
+
+
+const char* TypeRecordingUnaryOpStub::GetName() {
UNIMPLEMENTED_MIPS();
+ return NULL;
}
-// For bitwise ops where the inputs are not both Smis we here try to determine
-// whether both inputs are either Smis or at least heap numbers that can be
-// represented by a 32 bit signed value. We truncate towards zero as required
-// by the ES spec. If this is the case we do the bitwise op and see if the
-// result is a Smi. If so, great, otherwise we try to find a heap number to
-// write the answer into (either by allocating or by overwriting).
-// On entry the operands are in lhs (x) and rhs (y). (Result = x op y).
-// On exit the result is in v0.
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+void TypeRecordingUnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
- GenericBinaryOpStub stub(key, type_info);
- return stub.GetCode();
+void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+ Label* non_smi,
+ Label* slow) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+ Label* non_smi) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot(
+ MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+ Label* slow) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeBitNot(
+ MacroAssembler* masm, Label* slow) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateGenericCodeFallback(
+ MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
}
// requested the code falls through. If number allocation is requested but a
// heap number cannot be allocated the code jumps to the lable gc_required.
void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+ Label* use_runtime,
Label* gc_required,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
UNIMPLEMENTED_MIPS();
}
+void MathPowStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
bool CEntryStub::NeedsImmovableCode() {
return true;
}
}
-// StringCharCodeAtGenerator
-
+// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register right,
Register left,
+ Register right,
Register scratch1,
Register scratch2,
Register scratch3,
}
+void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub {
public:
- explicit TranscendentalCacheStub(TranscendentalCache::Type type)
- : type_(type) {}
+ enum ArgumentType {
+ TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
+ UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
+ };
+
+ TranscendentalCacheStub(TranscendentalCache::Type type,
+ ArgumentType argument_type)
+ : type_(type), argument_type_(argument_type) { }
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
+ ArgumentType argument_type_;
+ void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
+
Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_; }
+ int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction();
};
};
-class GenericBinaryOpStub : public CodeStub {
+class TypeRecordingUnaryOpStub: public CodeStub {
public:
- static const int kUnknownIntValue = -1;
-
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- Register lhs,
- Register rhs,
- int constant_rhs = kUnknownIntValue)
+ TypeRecordingUnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
: op_(op),
mode_(mode),
- lhs_(lhs),
- rhs_(rhs),
- constant_rhs_(constant_rhs),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
- runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
- name_(NULL) { }
+ operand_type_(TRUnaryOpIC::UNINITIALIZED),
+ name_(NULL) {
+ }
- GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
+ TypeRecordingUnaryOpStub(
+ int key,
+ TRUnaryOpIC::TypeInfo operand_type)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
- lhs_(LhsRegister(RegisterBits::decode(key))),
- rhs_(RhsRegister(RegisterBits::decode(key))),
- constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
- runtime_operands_type_(type_info),
- name_(NULL) { }
+ operand_type_(operand_type),
+ name_(NULL) {
+ }
private:
Token::Value op_;
- OverwriteMode mode_;
- Register lhs_;
- Register rhs_;
- int constant_rhs_;
- bool specialized_on_rhs_;
- BinaryOpIC::TypeInfo runtime_operands_type_;
- char* name_;
-
- static const int kMaxKnownRhs = 0x40000000;
- static const int kKnownRhsKeyBits = 6;
+ UnaryOverwriteMode mode_;
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 6> {};
- class TypeInfoBits: public BitField<int, 8, 3> {};
- class RegisterBits: public BitField<bool, 11, 1> {};
- class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
+ // Operand type information determined at runtime.
+ TRUnaryOpIC::TypeInfo operand_type_;
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
- (lhs_.is(a1) && rhs_.is(a0)));
- // Encode the parameters in a unique 16 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | KnownIntBits::encode(MinorKeyForKnownInt())
- | TypeInfoBits::encode(runtime_operands_type_)
- | RegisterBits::encode(lhs_.is(a0));
- }
+ char* name_;
- void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm,
- Register lhs,
- Register rhs);
- void HandleBinaryOpSlowCases(MacroAssembler* masm,
- Label* not_smi,
- Register lhs,
- Register rhs,
- const Builtins::JavaScript& builtin);
- void GenerateTypeTransition(MacroAssembler* masm);
+ const char* GetName();
- static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
- if (constant_rhs == kUnknownIntValue) return false;
- if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
- if (op == Token::MOD) {
- if (constant_rhs <= 1) return false;
- if (constant_rhs <= 10) return true;
- if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
- return false;
- }
- return false;
+#ifdef DEBUG
+ void Print() {
+ PrintF("TypeRecordingUnaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ TRUnaryOpIC::GetName(operand_type_));
}
+#endif
- int MinorKeyForKnownInt() {
- if (!specialized_on_rhs_) return 0;
- if (constant_rhs_ <= 10) return constant_rhs_ + 1;
- ASSERT(IsPowerOf2(constant_rhs_));
- int key = 12;
- int d = constant_rhs_;
- while ((d & 1) == 0) {
- key++;
- d >>= 1;
- }
- ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
- return key;
- }
+ class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+ class OpBits: public BitField<Token::Value, 1, 7> {};
+ class OperandTypeInfoBits: public BitField<TRUnaryOpIC::TypeInfo, 8, 3> {};
- int KnownBitsForMinorKey(int key) {
- if (!key) return 0;
- if (key <= 11) return key - 1;
- int d = 1;
- while (key != 12) {
- key--;
- d <<= 1;
- }
- return d;
+ Major MajorKey() { return TypeRecordingUnaryOp; }
+ int MinorKey() {
+ return ModeBits::encode(mode_)
+ | OpBits::encode(op_)
+ | OperandTypeInfoBits::encode(operand_type_);
}
- Register LhsRegister(bool lhs_is_a0) {
- return lhs_is_a0 ? a0 : a1;
- }
+ // Note: A lot of the helper functions below will vanish when we use virtual
+ // function instead of switch more often.
+ void Generate(MacroAssembler* masm);
- Register RhsRegister(bool lhs_is_a0) {
- return lhs_is_a0 ? a1 : a0;
- }
+ void GenerateTypeTransition(MacroAssembler* masm);
- bool HasSmiSmiFastPath() {
- return op_ != Token::DIV;
- }
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateSmiStubSub(MacroAssembler* masm);
+ void GenerateSmiStubBitNot(MacroAssembler* masm);
+ void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
+ void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
- bool ShouldGenerateSmiCode() {
- return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
- runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateHeapNumberStubSub(MacroAssembler* masm);
+ void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+ void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
- bool ShouldGenerateFPCode() {
- return runtime_operands_type_ != BinaryOpIC::STRINGS;
- }
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateGenericStubSub(MacroAssembler* masm);
+ void GenerateGenericStubBitNot(MacroAssembler* masm);
+ void GenerateGenericCodeFallback(MacroAssembler* masm);
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+ virtual int GetCodeKind() { return Code::TYPE_RECORDING_UNARY_OP_IC; }
virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(runtime_operands_type_);
+ return TRUnaryOpIC::ToState(operand_type_);
}
- const char* GetName();
-
virtual void FinishCode(Code* code) {
- code->set_binary_op_type(runtime_operands_type_);
- }
-
-#ifdef DEBUG
- void Print() {
- if (!specialized_on_rhs_) {
- PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
- } else {
- PrintF("GenericBinaryOpStub (%s by %d)\n",
- Token::String(op_),
- constant_rhs_);
- }
+ code->set_type_recording_unary_op_type(operand_type_);
}
-#endif
};
+
class TypeRecordingBinaryOpStub: public CodeStub {
public:
TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(TRBinaryOpIC::UNINITIALIZED),
name_(NULL) {
- UNIMPLEMENTED_MIPS();
+ use_fpu_ = CpuFeatures::IsSupported(FPU);
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
TypeRecordingBinaryOpStub(
Label* not_numbers,
Label* gc_required);
void GenerateSmiCode(MacroAssembler* masm,
+ Label* use_runtime,
Label* gc_required,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+ // Omit left string check in stub (left is definitely a string).
+ NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+ // Omit right string check in stub (right is definitely a string).
+ NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+ // Omit both string checks in stub.
+ NO_STRING_CHECK_IN_STUB =
+ NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
};
class StringAddStub: public CodeStub {
public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
+ explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
private:
Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
+ int MinorKey() { return flags_; }
void Generate(MacroAssembler* masm);
- // Should the stub check whether arguments are strings?
- bool string_check_;
+ void GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Label* slow);
+
+ const StringAddFlags flags_;
};
StringCompareStub() { }
// Compare two flat ASCII strings and returns result in v0.
- // Does not use the stack.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch3,
Register scratch4);
- private:
- Major MajorKey() { return StringCompare; }
- int MinorKey() { return 0; }
+ // Compares two flat ASCII strings for equality and returns result
+ // in v0.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
- void Generate(MacroAssembler* masm);
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* chars_not_equal);
};
};
-// Generate code the to load an element from a pixel array. The receiver is
-// assumed to not be a smi and to have elements, the caller must guarantee this
-// precondition. If the receiver does not have elements that are pixel arrays,
-// the generated code jumps to not_pixel_array. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated . If key is not a valid index within the
-// bounds of the pixel array, the generated code jumps to out_of_range.
-void GenerateFastPixelArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements_map,
- Register elements,
+class FloatingPointHelper : public AllStatic {
+ public:
+
+ enum Destination {
+ kFPURegisters,
+ kCoreRegisters
+ };
+
+
+ // Loads smis from a0 and a1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+ // is floating point registers FPU must be supported. If core registers are
+ // requested when FPU is supported f12 and f14 will be scratched.
+ static void LoadSmis(MacroAssembler* masm,
+ Destination destination,
+ Register scratch1,
+ Register scratch2);
+
+ // Loads objects from a0 and a1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+ // is floating point registers FPU must be supported. If core registers are
+ // requested when FPU is supported f12 and f14 will still be scratched. If
+ // either a0 or a1 is not a number (not smi and not heap number object) the
+ // not_number label is jumped to with a0 and a1 intact.
+ static void LoadOperands(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+
+ // Convert the smi or heap number in object to an int32 using the rules
+ // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+ // and brought into the range -2^31 .. +2^31 - 1.
+ static void ConvertNumberToInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_int32);
+
+ // Converts the integer (untagged smi) in |int_scratch| to a double, storing
+ // the result either in |double_dst| or |dst2:dst1|, depending on
+ // |destination|.
+ // Warning: The value in |int_scratch| will be changed in the process!
+ static void ConvertIntToDouble(MacroAssembler* masm,
+ Register int_scratch,
+ Destination destination,
+ FPURegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register scratch2,
+ FPURegister single_scratch);
+
+ // Load the number from object into double_dst in the double format.
+ // Control will jump to not_int32 if the value cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be loaded.
+ static void LoadNumberAsInt32Double(MacroAssembler* masm,
+ Register object,
+ Destination destination,
+ FPURegister double_dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister single_scratch,
+ Label* not_int32);
+
+ // Loads the number from object into dst as a 32-bit integer.
+ // Control will jump to not_int32 if the object cannot be exactly represented
+ // by a 32-bit integer.
+ // Floating point value in the 32-bit integer range that are not exact integer
+ // won't be converted.
+ // scratch3 is not used when FPU is supported.
+ static void LoadNumberAsInt32(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
Register scratch1,
Register scratch2,
- Register result,
- Label* not_pixel_array,
- Label* key_not_smi,
- Label* out_of_range);
+ Register scratch3,
+ FPURegister double_scratch,
+ Label* not_int32);
+
+ // Generate non FPU code to check if a double can be exactly represented by a
+ // 32-bit integer. This does not check for 0 or -0, which need
+ // to be checked for separately.
+ // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
+ // through otherwise.
+ // src1 and src2 will be cloberred.
+ //
+ // Expected input:
+ // - src1: higher (exponent) part of the double value.
+ // - src2: lower (mantissa) part of the double value.
+ // Output status:
+ // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
+ // - src2: contains 1.
+ // - other registers are clobbered.
+ static void DoubleIs32BitInteger(MacroAssembler* masm,
+ Register src1,
+ Register src2,
+ Register dst,
+ Register scratch,
+ Label* not_int32);
+
+ // Generates code to call a C function to do a double operation using core
+ // registers. (Used when FPU is not supported.)
+ // This code never falls through, but returns with a heap number containing
+ // the result in v0.
+ // Register heapnumber_result must be a heap number in which the
+ // result of the operation will be stored.
+ // Requires the following layout on entry:
+ // a0: Left value (least significant part of mantissa).
+ // a1: Left value (sign, exponent, top of mantissa).
+ // a2: Right value (least significant part of mantissa).
+ // a3: Right value (sign, exponent, top of mantissa).
+ static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch);
+
+ private:
+ static void LoadNumber(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register object,
+ FPURegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+};
+
+
+class StringDictionaryLookupStub: public CodeStub {
+ public:
+ enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+ explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
+
+ void Generate(MacroAssembler* masm);
+
+ static void GenerateNegativeLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register receiver,
+ Register properties,
+ String* name,
+ Register scratch0) ;
+
+ static void GeneratePositiveLookup(MacroAssembler* masm,
+ Label* miss,
+ Label* done,
+ Register elements,
+ Register name,
+ Register r0,
+ Register r1);
+
+ private:
+ static const int kInlinedProbes = 4;
+ static const int kTotalProbes = 20;
+
+ static const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+
+ static const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("StringDictionaryLookupStub\n");
+ }
+#endif
+
+ Major MajorKey() { return StringDictionaryNegativeLookup; }
+
+ int MinorKey() {
+ return LookupModeBits::encode(mode_);
+ }
+
+ class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+ LookupMode mode_;
+};
} } // namespace v8::internal
+++ /dev/null
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
-#define V8_MIPS_CODEGEN_MIPS_INL_H_
-
-#include "virtual-frame-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() {
- __ b(&entry_label_);
- __ nop();
-}
-
-
-// Note: this has been hacked for submisson. Mips branches require two
-// additional operands: Register src1, const Operand& src2.
-void DeferredCode::Branch(Condition cond) {
- __ Branch(&entry_label_, cond, zero_reg, Operand(0));
-}
-
-
-void Reference::GetValueAndSpill() {
- GetValue();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_CODEGEN_MIPS_INL_H_
-
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "jsregexp.h"
-#include "jump-target-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "register-allocator-inl.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-#include "virtual-frame-mips-inl.h"
+#include "codegen.h"
namespace v8 {
namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
- // On MIPS you either have a completely spilled frame or you
- // handle it yourself, but at the moment there's no automation
- // of registers and deferred code.
-}
-
-
-void DeferredCode::RestoreRegisters() {
-}
-
-
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- frame_state_->frame()->AssertIsSpilled();
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-}
-
-
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterInternalFrame();
}
}
-// -----------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
- : owner_(owner),
- previous_(owner->state()) {
- owner->set_state(this);
-}
-
-
-ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target)
- : CodeGenState(owner),
- true_target_(true_target),
- false_target_(false_target) {
- owner->set_state(this);
-}
-
-
-TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
- Slot* slot,
- TypeInfo type_info)
- : CodeGenState(owner),
- slot_(slot) {
- owner->set_state(this);
- old_type_info_ = owner->set_type_info(slot, type_info);
-}
-
-
-CodeGenState::~CodeGenState() {
- ASSERT(owner_->state() == this);
- owner_->set_state(previous_);
-}
-
-
-TypeInfoCodeGenState::~TypeInfoCodeGenState() {
- owner()->set_type_info(slot_, old_type_info_);
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
- : deferred_(8),
- masm_(masm),
- info_(NULL),
- frame_(NULL),
- allocator_(NULL),
- cc_reg_(cc_always),
- state_(NULL),
- loop_nesting_(0),
- type_info_(NULL),
- function_return_(JumpTarget::BIDIRECTIONAL),
- function_return_is_shadowed_(false) {
-}
-
-
-// Calling conventions:
-// fp: caller's frame pointer
-// sp: stack pointer
-// a1: called JS function
-// cp: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-int CodeGenerator::NumberOfSlot(Slot* slot) {
- UNIMPLEMENTED_MIPS();
- return 0;
-}
-
-
-MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0);
-}
-
-
-MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
- Slot* slot,
- Register tmp,
- Register tmp2,
- JumpTarget* slow) {
- UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0);
-}
-
-
-void CodeGenerator::LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::Load(Expression* x) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadGlobal() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadGlobalReceiver(Register scratch) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
- UNIMPLEMENTED_MIPS();
- return EAGER_ARGUMENTS_ALLOCATION;
-}
-
-
-void CodeGenerator::StoreArgumentsObject(bool initial) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Reference::~Reference() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
-// register to a boolean in the condition code register. The code
-// may jump to 'false_target' in case the register converts to 'false'.
-void CodeGenerator::ToBoolean(JumpTarget* true_target,
- JumpTarget* false_target) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- GenerateInlineSmi inline_smi,
- int constant_rhs) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
- DeferredInlineSmiOperation(Token::Value op,
- int value,
- bool reversed,
- OverwriteMode overwrite_mode,
- Register tos)
- : op_(op),
- value_(value),
- reversed_(reversed),
- overwrite_mode_(overwrite_mode),
- tos_register_(tos) {
- set_comment("[ DeferredInlinedSmiOperation");
- }
-
- virtual void Generate();
- // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
- // Exit(). Currently on MIPS SaveRegisters() and RestoreRegisters() are empty
- // methods, it is the responsibility of the deferred code to save and restore
- // registers.
- virtual bool AutoSaveAndRestore() { return false; }
-
- void JumpToNonSmiInput(Condition cond, Register cmp1, const Operand& cmp2);
- void JumpToAnswerOutOfRange(Condition cond,
- Register cmp1,
- const Operand& cmp2);
-
- private:
- void GenerateNonSmiInput();
- void GenerateAnswerOutOfRange();
- void WriteNonSmiAnswer(Register answer,
- Register heap_number,
- Register scratch);
-
- Token::Value op_;
- int value_;
- bool reversed_;
- OverwriteMode overwrite_mode_;
- Register tos_register_;
- Label non_smi_input_;
- Label answer_out_of_range_;
-};
-
-
-// For bit operations we try harder and handle the case where the input is not
-// a Smi but a 32bits integer without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond,
- Register cmp1,
- const Operand& cmp2) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// For bit operations the result is always 32bits so we handle the case where
-// the result does not fit in a Smi without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond,
- Register cmp1,
- const Operand& cmp2) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// On entry the non-constant side of the binary operation is in tos_register_
-// and the constant smi side is nowhere. The tos_register_ is not used by the
-// virtual frame. On exit the answer is in the tos_register_ and the virtual
-// frame is unchanged.
-void DeferredInlineSmiOperation::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// Convert and write the integer answer into heap_number.
-void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
- Register heap_number,
- Register scratch) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void DeferredInlineSmiOperation::GenerateNonSmiInput() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::SmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode mode) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// On MIPS we load registers condReg1 and condReg2 with the values which should
-// be compared. With the CodeGenerator::cc_reg_ condition, functions will be
-// able to evaluate correctly the condition. (eg CodeGenerator::Branch)
-void CodeGenerator::Comparison(Condition cc,
- Expression* left,
- Expression* right,
- bool strict) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- CallFunctionFlags flags,
- int position) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::CheckStack() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateReturnSequence() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::InstantiateFunction(
- Handle<SharedFunctionInfo> function_info,
- bool pretenure) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
- TypeofState state) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow,
- JumpTarget* done) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
- DeferredStringCharCodeAt(Register object,
- Register index,
- Register scratch,
- Register result)
- : result_(result),
- char_code_at_generator_(object,
- index,
- scratch,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharCodeAtGenerator* fast_case_generator() {
- return &char_code_at_generator_;
- }
-
- virtual void Generate() {
- UNIMPLEMENTED_MIPS();
- }
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
- DeferredStringCharFromCode(Register code,
- Register result)
- : char_from_code_generator_(code, result) {}
-
- StringCharFromCodeGenerator* fast_case_generator() {
- return &char_from_code_generator_;
- }
-
- virtual void Generate() {
- VirtualFrameRuntimeCallHelper call_helper(frame_state());
- char_from_code_generator_.GenerateSlow(masm(), call_helper);
- }
-
- private:
- StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
- DeferredStringCharAt(Register object,
- Register index,
- Register scratch1,
- Register scratch2,
- Register result)
- : result_(result),
- char_at_generator_(object,
- index,
- scratch1,
- scratch2,
- result,
- &need_conversion_,
- &need_conversion_,
- &index_out_of_range_,
- STRING_INDEX_IS_NUMBER) {}
-
- StringCharAtGenerator* fast_case_generator() {
- return &char_at_generator_;
- }
-
- virtual void Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
- private:
- Register result_;
-
- Label need_conversion_;
- Label index_out_of_range_;
-
- StringCharAtGenerator char_at_generator_;
-};
-
-
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
- DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
- Register map_result,
- Register scratch1,
- Register scratch2)
- : object_(object),
- map_result_(map_result),
- scratch1_(scratch1),
- scratch2_(scratch2) { }
-
- virtual void Generate() {
- UNIMPLEMENTED_MIPS();
- }
-
- private:
- Register object_;
- Register map_result_;
- Register scratch1_;
- Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
- ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
- DeferredSearchCache(Register dst, Register cache, Register key)
- : dst_(dst), cache_(cache), key_(key) {
- set_comment("[ DeferredSearchCache");
- }
-
- virtual void Generate();
-
- private:
- Register dst_, cache_, key_;
-};
-
-
-void DeferredSearchCache::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
- DeferredSwapElements(Register object, Register index1, Register index2)
- : object_(object), index1_(index1), index2_(index2) {
- set_comment("[ DeferredSwapElements");
- }
-
- virtual void Generate();
-
- private:
- Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredCountOperation: public DeferredCode {
- public:
- DeferredCountOperation(Register value,
- bool is_increment,
- bool is_postfix,
- int target_size)
- : value_(value),
- is_increment_(is_increment),
- is_postfix_(is_postfix),
- target_size_(target_size) {}
-
- virtual void Generate() {
- UNIMPLEMENTED_MIPS();
- }
-
- private:
- Register value_;
- bool is_increment_;
- bool is_postfix_;
- int target_size_;
-};
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
- explicit DeferredReferenceGetNamedValue(Register receiver,
- Handle<String> name,
- bool is_contextual)
- : receiver_(receiver),
- name_(name),
- is_contextual_(is_contextual),
- is_dont_delete_(false) {
- set_comment(is_contextual
- ? "[ DeferredReferenceGetNamedValue (contextual)"
- : "[ DeferredReferenceGetNamedValue");
- }
-
- virtual void Generate();
-
- void set_is_dont_delete(bool value) {
- ASSERT(is_contextual_);
- is_dont_delete_ = value;
- }
-
- private:
- Register receiver_;
- Handle<String> name_;
- bool is_contextual_;
- bool is_dont_delete_;
-};
-
-
-
-void DeferredReferenceGetNamedValue::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceGetKeyedValue(Register key, Register receiver)
- : key_(key), receiver_(receiver) {
- set_comment("[ DeferredReferenceGetKeyedValue");
- }
-
- virtual void Generate();
-
- private:
- Register key_;
- Register receiver_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
- DeferredReferenceSetKeyedValue(Register value,
- Register key,
- Register receiver)
- : value_(value), key_(key), receiver_(receiver) {
- set_comment("[ DeferredReferenceSetKeyedValue");
- }
-
- virtual void Generate();
-
- private:
- Register value_;
- Register key_;
- Register receiver_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-class DeferredReferenceSetNamedValue: public DeferredCode {
- public:
- DeferredReferenceSetNamedValue(Register value,
- Register receiver,
- Handle<String> name)
- : value_(value), receiver_(receiver), name_(name) {
- set_comment("[ DeferredReferenceSetNamedValue");
- }
-
- virtual void Generate();
-
- private:
- Register value_;
- Register receiver_;
- Handle<String> name_;
-};
-
-
-void DeferredReferenceSetNamedValue::Generate() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedLoad() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::EmitKeyedStore(StaticType* key_type,
- WriteBarrierCharacter wb_info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-#endif
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-// -----------------------------------------------------------------------------
-// Reference support.
-
-
-Handle<String> Reference::GetName() {
- UNIMPLEMENTED_MIPS();
- return Handle<String>();
-}
-
-
-void Reference::DupIfPersist() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Reference::GetValue() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
- UNIMPLEMENTED_MIPS();
- return name_;
-}
-
-
-#undef __
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// Forward declarations
class CompilationInfo;
-class DeferredCode;
-class JumpTarget;
-class RegisterAllocator;
-class RegisterFile;
-enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
-enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
-
-
-// -----------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-class Reference BASE_EMBEDDED {
- public:
- // The values of the types is important, see size().
- enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
- Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get = false);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT_EQ(ILLEGAL, type_);
- type_ = value;
- }
-
- void set_unloaded() {
- ASSERT_NE(ILLEGAL, type_);
- ASSERT_NE(UNLOADED, type_);
- type_ = UNLOADED;
- }
- // The size the reference takes up on the stack.
- int size() const {
- return (type_ < SLOT) ? 0 : type_;
- }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
- bool is_slot() const { return type_ == SLOT; }
- bool is_property() const { return type_ == NAMED || type_ == KEYED; }
- bool is_unloaded() const { return type_ == UNLOADED; }
-
- // Return the name. Only valid for named property references.
- Handle<String> GetName();
-
- // Generate code to push the value of the reference on top of the
- // expression stack. The reference is expected to be already on top of
- // the expression stack, and it is consumed by the call unless the
- // reference is for a compound assignment.
- // If the reference is not consumed, it is left in place under its value.
- void GetValue();
-
- // Generate code to pop a reference, push the value of the reference,
- // and then spill the stack frame.
- inline void GetValueAndSpill();
-
- // Generate code to store the value on top of the expression stack in the
- // reference. The reference is expected to be immediately below the value
- // on the expression stack. The value is stored in the location specified
- // by the reference, and is left on top of the stack, after the reference
- // is popped from beneath it (unloaded).
- void SetValue(InitState init_state, WriteBarrierCharacter wb);
-
- // This is in preparation for something that uses the reference on the stack.
- // If we need this reference afterwards get then dup it now. Otherwise mark
- // it as used.
- inline void DupIfPersist();
-
- private:
- CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
- // Keep the reference on the stack after get, so it can be used by set later.
- bool persist_after_get_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair). It is threaded through the
-// call stack. Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(CodeGenerator* owner);
-
-
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- virtual ~CodeGenState();
-
- virtual JumpTarget* true_target() const { return NULL; }
- virtual JumpTarget* false_target() const { return NULL; }
-
- protected:
- inline CodeGenerator* owner() { return owner_; }
- inline CodeGenState* previous() const { return previous_; }
-
- private:
- // The owning code generator.
- CodeGenerator* owner_;
-
-
-
- // The previous state of the owning code generator, restored when
- // this state is destroyed.
- CodeGenState* previous_;
-};
-
-
-class ConditionCodeGenState : public CodeGenState {
- public:
- // Create a code generator state based on a code generator's current
- // state. The new state has its own pair of branch labels.
- ConditionCodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target);
-
- virtual JumpTarget* true_target() const { return true_target_; }
- virtual JumpTarget* false_target() const { return false_target_; }
-
- private:
- JumpTarget* true_target_;
- JumpTarget* false_target_;
-};
-
-
-class TypeInfoCodeGenState : public CodeGenState {
- public:
- TypeInfoCodeGenState(CodeGenerator* owner,
- Slot* slot_number,
- TypeInfo info);
- virtual ~TypeInfoCodeGenState();
-
- virtual JumpTarget* true_target() const { return previous()->true_target(); }
- virtual JumpTarget* false_target() const {
- return previous()->false_target();
- }
-
- private:
- Slot* slot_;
- TypeInfo old_type_info_;
-};
-
// -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
- NO_ARGUMENTS_ALLOCATION,
- EAGER_ARGUMENTS_ALLOCATION,
- LAZY_ARGUMENTS_ALLOCATION
-};
-
-
-// -----------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
public:
- // Compilation mode. Either the compiler is used as the primary
- // compiler and needs to setup everything or the compiler is used as
- // the secondary compiler for split compilation and has to handle
- // bailouts.
- enum Mode {
- PRIMARY,
- SECONDARY
- };
-
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
int pos,
bool right_here = false);
- // Accessors
- MacroAssembler* masm() { return masm_; }
- VirtualFrame* frame() const { return frame_; }
- inline Handle<Script> script();
-
- bool has_valid_frame() const { return frame_ != NULL; }
-
- // Set the virtual frame to be new_frame, with non-frame register
- // reference counts given by non_frame_registers. The non-frame
- // register reference counts of the old frame are returned in
- // non_frame_registers.
- void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
- void DeleteFrame();
-
- RegisterAllocator* allocator() const { return allocator_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- TypeInfo type_info(Slot* slot) {
- int index = NumberOfSlot(slot);
- if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
- return (*type_info_)[index];
- }
-
- TypeInfo set_type_info(Slot* slot, TypeInfo info) {
- int index = NumberOfSlot(slot);
- ASSERT(index >= kInvalidSlotNumber);
- if (index != kInvalidSlotNumber) {
- TypeInfo previous_value = (*type_info_)[index];
- (*type_info_)[index] = info;
- return previous_value;
- }
- return TypeInfo::Unknown();
- }
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
// Constants related to patching of inlined load/store.
static int GetInlinedKeyedLoadInstructionsAfterPatch() {
// This is in correlation with the padding in MacroAssembler::Abort.
return FLAG_debug_code ? 45 : 20;
}
- static const int kInlinedKeyedStoreInstructionsAfterPatch = 9;
+
+ static const int kInlinedKeyedStoreInstructionsAfterPatch = 13;
+
static int GetInlinedNamedStoreInstructionsAfterPatch() {
ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
// Magic number 5: instruction count after patched map load:
}
private:
- // Type of a member function that generates inline code for a native function.
- typedef void (CodeGenerator::*InlineFunctionGenerator)
- (ZoneList<Expression*>*);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
-
- // Construction/Destruction.
- explicit CodeGenerator(MacroAssembler* masm);
-
- // Accessors.
- inline bool is_eval();
- inline Scope* scope();
- inline bool is_strict_mode();
- inline StrictModeFlag strict_mode_flag();
-
- // Generating deferred code.
- void ProcessDeferred();
-
- static const int kInvalidSlotNumber = -1;
-
- int NumberOfSlot(Slot* slot);
- // State
- bool has_cc() const { return cc_reg_ != cc_always; }
-
- JumpTarget* true_target() const { return state_->true_target(); }
- JumpTarget* false_target() const { return state_->false_target(); }
-
- // Track loop nesting level.
- int loop_nesting() const { return loop_nesting_; }
- void IncrementLoopNesting() { loop_nesting_++; }
- void DecrementLoopNesting() { loop_nesting_--; }
-
- // Node visitors.
- void VisitStatements(ZoneList<Statement*>* statements);
-
- virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Main code generation function
- void Generate(CompilationInfo* info);
-
- // Generate the return sequence code. Should be called no more than
- // once per compiled function, immediately after binding the return
- // target (which can not be done more than once). The return value should
- // be in v0.
- void GenerateReturnSequence();
-
- // Returns the arguments allocation mode.
- ArgumentsAllocationMode ArgumentsMode();
-
- // Store the arguments object and allocate it if necessary.
- void StoreArgumentsObject(bool initial);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
- void UnloadReference(Reference* ref);
-
- MemOperand SlotOperand(Slot* slot, Register tmp);
-
- MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
- Register tmp,
- Register tmp2,
- JumpTarget* slow);
-
- void LoadCondition(Expression* x,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_cc);
- void Load(Expression* x);
- void LoadGlobal();
- void LoadGlobalReceiver(Register scratch);
-
-
- // Special code for typeof expressions: Unfortunately, we must
- // be careful when loading the expression in 'typeof'
- // expressions. We are not allowed to throw reference errors for
- // non-existing properties of the global object, so we must make it
- // look like an explicit property access, instead of an access
- // through the context chain.
- void LoadTypeofExpression(Expression* x);
-
- // Store a keyed property. Key and receiver are on the stack and the value is
- // in a0. Result is returned in r0.
- void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
-
- // Read a value from a slot and leave it on top of the expression stack.
- void LoadFromSlot(Slot* slot, TypeofState typeof_state);
- void LoadFromGlobalSlotCheckExtensions(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow);
- void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
-
- // Support for loading from local/global variables and arguments
- // whose location is known unless they are shadowed by
- // eval-introduced bindings. Generates no code for unsupported slot
- // types and therefore expects to fall through to the slow jump target.
- void EmitDynamicLoadFromSlotFastCase(Slot* slot,
- TypeofState typeof_state,
- JumpTarget* slow,
- JumpTarget* done);
-
- // Store the value on top of the stack to a slot.
- void StoreToSlot(Slot* slot, InitState init_state);
-
- // Support for compiling assignment expressions.
- void EmitSlotAssignment(Assignment* node);
- void EmitNamedPropertyAssignment(Assignment* node);
- void EmitKeyedPropertyAssignment(Assignment* node);
-
- // Load a named property, returning it in v0. The receiver is passed on the
- // stack, and remains there.
- void EmitNamedLoad(Handle<String> name, bool is_contextual);
-
- // Store to a named property. If the store is contextual, value is passed on
- // the frame and consumed. Otherwise, receiver and value are passed on the
- // frame and consumed. The result is returned in v0.
- void EmitNamedStore(Handle<String> name, bool is_contextual);
-
- // Load a keyed property, leaving it in v0. The receiver and key are
- // passed on the stack, and remain there.
- void EmitKeyedLoad();
-
- void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
-
- // Generate code that computes a shortcutting logical operation.
- void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
- void GenericBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- GenerateInlineSmi inline_smi,
- int known_rhs =
- GenericBinaryOpStub::kUnknownIntValue);
-
- void VirtualFrameBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- int known_rhs =
- GenericBinaryOpStub::kUnknownIntValue);
-
- void SmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode mode);
-
- void Comparison(Condition cc,
- Expression* left,
- Expression* right,
- bool strict = false);
-
- void CallWithArguments(ZoneList<Expression*>* arguments,
- CallFunctionFlags flags,
- int position);
-
- // An optimized implementation of expressions of the form
- // x.apply(y, arguments). We call x the applicand and y the receiver.
- // The optimization avoids allocating an arguments object if possible.
- void CallApplyLazy(Expression* applicand,
- Expression* receiver,
- VariableProxy* arguments,
- int position);
-
- // Control flow
- void Branch(bool if_true, JumpTarget* target);
- void CheckStack();
-
- bool CheckForInlineRuntimeCall(CallRuntime* node);
-
- static Handle<Code> ComputeLazyCompile(int argc);
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Instantiate the function based on the shared function info.
- void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
- bool pretenure);
-
- // Support for type checks.
- void GenerateIsSmi(ZoneList<Expression*>* args);
- void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- void GenerateIsArray(ZoneList<Expression*>* args);
- void GenerateIsRegExp(ZoneList<Expression*>* args);
-
- // Support for construct call checks.
- void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
- // Support for arguments.length and arguments[?].
- void GenerateArgumentsLength(ZoneList<Expression*>* args);
- void GenerateArguments(ZoneList<Expression*>* args);
-
- // Support for accessing the class and value fields of an object.
- void GenerateClassOf(ZoneList<Expression*>* args);
- void GenerateValueOf(ZoneList<Expression*>* args);
- void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- // Fast support for charCodeAt(n).
- void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
- // Fast support for string.charAt(n) and string[n].
- void GenerateStringCharAt(ZoneList<Expression*>* args);
-
- // Fast support for object equality testing.
- void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- void GenerateLog(ZoneList<Expression*>* args);
-
- // Fast support for Math.random().
- void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
- void GenerateIsObject(ZoneList<Expression*>* args);
- void GenerateIsSpecObject(ZoneList<Expression*>* args);
- void GenerateIsFunction(ZoneList<Expression*>* args);
- void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
- void GenerateStringAdd(ZoneList<Expression*>* args);
- void GenerateSubString(ZoneList<Expression*>* args);
- void GenerateStringCompare(ZoneList<Expression*>* args);
- void GenerateIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* args);
-
- // Support for direct calls from JavaScript to native RegExp code.
- void GenerateRegExpExec(ZoneList<Expression*>* args);
-
- void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
- // Support for fast native caches.
- void GenerateGetFromCache(ZoneList<Expression*>* args);
-
- // Fast support for number to string.
- void GenerateNumberToString(ZoneList<Expression*>* args);
-
- // Fast swapping of elements.
- void GenerateSwapElements(ZoneList<Expression*>* args);
-
- // Fast call for custom callbacks.
- void GenerateCallFunction(ZoneList<Expression*>* args);
-
- // Fast call to math functions.
- void GenerateMathPow(ZoneList<Expression*>* args);
- void GenerateMathSin(ZoneList<Expression*>* args);
- void GenerateMathCos(ZoneList<Expression*>* args);
- void GenerateMathSqrt(ZoneList<Expression*>* args);
- void GenerateMathLog(ZoneList<Expression*>* args);
-
- void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
- void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
- void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
- // Simple condition analysis.
- enum ConditionAnalysis {
- ALWAYS_TRUE,
- ALWAYS_FALSE,
- DONT_KNOW
- };
- ConditionAnalysis AnalyzeCondition(Expression* cond);
-
- // Methods used to indicate which source code is generated for. Source
- // positions are collected by the assembler and emitted with the relocation
- // information.
- void CodeForFunctionPosition(FunctionLiteral* fun);
- void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Statement* node);
- void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
- void CodeForSourcePosition(int pos);
-
-#ifdef DEBUG
- // True if the registers are valid for entry to a block.
- bool HasValidEntryRegisters();
-#endif
-
- List<DeferredCode*> deferred_;
-
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- CompilationInfo* info_;
-
- // Code generation state
- VirtualFrame* frame_;
- RegisterAllocator* allocator_;
- Condition cc_reg_;
- CodeGenState* state_;
- int loop_nesting_;
-
- Vector<TypeInfo>* type_info_;
- // Jump targets
- BreakTarget function_return_;
-
- // True if the function return is shadowed (ie, jumping to the target
- // function_return_ does not jump to the true function return, but rather
- // to some unlinking code).
- bool function_return_is_shadowed_;
-
- friend class VirtualFrame;
- friend class Isolate;
- friend class JumpTarget;
- friend class Reference;
- friend class FastCodeGenerator;
- friend class FullCodeGenerator;
- friend class FullCodeGenSyntaxChecker;
- friend class InlineRuntimeFunctionsTable;
- friend class LCodeGen;
-
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// -----------------------------------------------------------------------------
-// Registers
+// Registers.
// These register names are defined in a way to match the native disassembler
// -----------------------------------------------------------------------------
-// Instruction
+// Instructions.
bool Instruction::IsForbiddenInBranchDelay() const {
const int op = OpcodeFieldRaw();
UNREACHABLE();
};
break;
- case COP1: // Coprocessor instructions
+ case COP1: // Coprocessor instructions.
switch (RsFieldRawNoAssert()) {
- case BC1: // branch on coprocessor condition
+ case BC1: // Branch on coprocessor condition.
return kImmediateType;
default:
return kRegisterType;
};
break;
- // 16 bits Immediate type instructions. eg: addi dest, src, imm16
+ // 16 bits Immediate type instructions. eg: addi dest, src, imm16.
case REGIMM:
case BEQ:
case BNE:
case SWC1:
case SDC1:
return kImmediateType;
- // 26 bits immediate type instructions. eg: j imm26
+ // 26 bits immediate type instructions. eg: j imm26.
case J:
case JAL:
return kJumpType;
namespace internal {
// -----------------------------------------------------------------------------
-// Registers and FPURegister.
+// Registers and FPURegisters.
// Number of general purpose registers.
static const int kNumRegisters = 32;
// FCSR constants.
static const uint32_t kFCSRFlagMask = (1 << 6) - 1;
static const uint32_t kFCSRFlagShift = 2;
+static const uint32_t kFCSRInexactFlagBit = 1 << 0;
+static const uint32_t kFCSRUnderflowFlagBit = 1 << 1;
+static const uint32_t kFCSROverflowFlagBit = 1 << 2;
+static const uint32_t kFCSRDivideByZeroFlagBit = 1 << 3;
+static const uint32_t kFCSRInvalidOpFlagBit = 1 << 4;
// Helper functions for converting between register numbers and names.
class Registers {
// On MIPS all instructions are 32 bits.
typedef int32_t Instr;
-typedef unsigned char byte_;
-
// Special Software Interrupt codes when used in the presence of the MIPS
// simulator.
enum SoftwareInterruptCodes {
static const int kFBtrueShift = 16;
static const int kFBtrueBits = 1;
-// ----- Miscellianous useful masks.
+// ----- Miscellaneous useful masks.
// Instruction bit masks.
static const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
static const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
XORI = ((1 << 3) + 6) << kOpcodeShift,
LUI = ((1 << 3) + 7) << kOpcodeShift,
- COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class
+ COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
BEQL = ((2 << 3) + 4) << kOpcodeShift,
BNEL = ((2 << 3) + 5) << kOpcodeShift,
BLEZL = ((2 << 3) + 6) << kOpcodeShift,
cc_always = 16,
- // aliases
+ // Aliases.
carry = Uless,
not_carry = Ugreater_equal,
zero = equal,
// ----- Coprocessor conditions.
enum FPUCondition {
- F, // False
- UN, // Unordered
- EQ, // Equal
- UEQ, // Unordered or Equal
- OLT, // Ordered or Less Than
- ULT, // Unordered or Less Than
- OLE, // Ordered or Less Than or Equal
- ULE // Unordered or Less Than or Equal
+ F, // False.
+ UN, // Unordered.
+ EQ, // Equal.
+ UEQ, // Unordered or Equal.
+ OLT, // Ordered or Less Than.
+ ULT, // Unordered or Less Than.
+ OLE, // Ordered or Less Than or Equal.
+ ULE // Unordered or Less Than or Equal.
};
extern const Instr kPushInstruction;
// sw(r, MemOperand(sp, 0))
extern const Instr kPushRegPattern;
-// lw(r, MemOperand(sp, 0))
+// lw(r, MemOperand(sp, 0))
extern const Instr kPopRegPattern;
extern const Instr kLwRegFpOffsetPattern;
extern const Instr kSwRegFpOffsetPattern;
// reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instruction.
// Use the At(pc) function to create references to Instruction.
- static Instruction* At(byte_* pc) {
+ static Instruction* At(byte* pc) {
return reinterpret_cast<Instruction*>(pc);
}
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
void CPU::Setup() {
- CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
- cpu_features->Probe(true);
- if (!cpu_features->IsSupported(FPU) || Serializer::enabled()) {
- V8::DisableCrankshaft();
- }
+ CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+ return CpuFeatures::IsSupported(FPU);
}
void CPU::FlushICache(void* start, size_t size) {
+ // Nothing to do, flushing no instructions.
+ if (size == 0) {
+ return;
+ }
+
#if !defined (USE_SIMULATOR)
int res;
- // See http://www.linux-mips.org/wiki/Cacheflush_Syscall
+ // See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
if (res) {
#if defined(V8_TARGET_ARCH_MIPS)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
namespace v8 {
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// NameConverter converter;
// Disassembler d(converter);
-// for (byte_* pc = begin; pc < end;) {
+// for (byte* pc = begin; pc < end;) {
// v8::internal::EmbeddedVector<char, 256> buffer;
// byte* prev_pc = pc;
// pc += d.InstructionDecode(buffer, pc);
// Writes one disassembled instruction into 'buffer' (0-terminated).
// Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(byte_* instruction);
+ int InstructionDecode(byte* instruction);
private:
// Bottleneck functions to print into the out_buffer.
void PrintFd(Instruction* instr);
void PrintSa(Instruction* instr);
void PrintSd(Instruction* instr);
+ void PrintSs1(Instruction* instr);
+ void PrintSs2(Instruction* instr);
void PrintBc(Instruction* instr);
void PrintCc(Instruction* instr);
void PrintFunction(Instruction* instr);
}
-// Print the integer value of the rd field, (when it is not used as reg).
+// Print the integer value of the rd field, when it is not used as reg.
void Decoder::PrintSd(Instruction* instr) {
int sd = instr->RdValue();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
}
+// Print the integer value of the rd field, when used as 'ext' size.
+void Decoder::PrintSs1(Instruction* instr) {
+ int ss = instr->RdValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
+}
+
+
+// Print the integer value of the rd field, when used as 'ins' size.
+void Decoder::PrintSs2(Instruction* instr) {
+ int ss = instr->RdValue();
+ int pos = instr->SaValue();
+ out_buffer_pos_ +=
+ OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
+}
+
+
// Print the integer value of the cc field for the bc1t/f instructions.
void Decoder::PrintBc(Instruction* instr) {
int cc = instr->FBccValue();
// Print 16-bit signed immediate value.
void Decoder::PrintSImm16(Instruction* instr) {
- int32_t imm = ((instr->Imm16Value())<<16)>>16;
+ int32_t imm = ((instr->Imm16Value()) << 16) >> 16;
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'r');
- if (format[1] == 's') { // 'rs: Rs register
+ if (format[1] == 's') { // 'rs: Rs register.
int reg = instr->RsValue();
PrintRegister(reg);
return 2;
- } else if (format[1] == 't') { // 'rt: rt register
+ } else if (format[1] == 't') { // 'rt: rt register.
int reg = instr->RtValue();
PrintRegister(reg);
return 2;
- } else if (format[1] == 'd') { // 'rd: rd register
+ } else if (format[1] == 'd') { // 'rd: rd register.
int reg = instr->RdValue();
PrintRegister(reg);
return 2;
// complexity of FormatOption.
int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'f');
- if (format[1] == 's') { // 'fs: fs register
+ if (format[1] == 's') { // 'fs: fs register.
int reg = instr->FsValue();
PrintFPURegister(reg);
return 2;
- } else if (format[1] == 't') { // 'ft: ft register
+ } else if (format[1] == 't') { // 'ft: ft register.
int reg = instr->FtValue();
PrintFPURegister(reg);
return 2;
- } else if (format[1] == 'd') { // 'fd: fd register
+ } else if (format[1] == 'd') { // 'fd: fd register.
int reg = instr->FdValue();
PrintFPURegister(reg);
return 2;
// characters that were consumed from the formatting string.
int Decoder::FormatOption(Instruction* instr, const char* format) {
switch (format[0]) {
- case 'c': { // 'code for break or trap instructions
+ case 'c': { // 'code for break or trap instructions.
ASSERT(STRING_STARTS_WITH(format, "code"));
PrintCode(instr);
return 4;
}
- case 'i': { // 'imm16u or 'imm26
+ case 'i': { // 'imm16u or 'imm26.
if (format[3] == '1') {
ASSERT(STRING_STARTS_WITH(format, "imm16"));
if (format[5] == 's') {
return 5;
}
}
- case 'r': { // 'r: registers
+ case 'r': { // 'r: registers.
return FormatRegister(instr, format);
}
- case 'f': { // 'f: FPUregisters
+ case 'f': { // 'f: FPUregisters.
return FormatFPURegister(instr, format);
}
- case 's': { // 'sa
+ case 's': { // 'sa.
switch (format[1]) {
case 'a': {
ASSERT(STRING_STARTS_WITH(format, "sa"));
PrintSd(instr);
return 2;
}
+ case 's': {
+ if (format[2] == '1') {
+ ASSERT(STRING_STARTS_WITH(format, "ss1")); /* ext size */
+ PrintSs1(instr);
+ return 3;
+ } else {
+ ASSERT(STRING_STARTS_WITH(format, "ss2")); /* ins size */
+ PrintSs2(instr);
+ return 3;
+ }
+ }
}
}
case 'b': { // 'bc - Special for bc1 cc field.
void Decoder::DecodeTypeRegister(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
- case COP1: // Coprocessor instructions
+ case COP1: // Coprocessor instructions.
switch (instr->RsFieldRaw()) {
case BC1: // bc1 handled in DecodeTypeImmediate.
UNREACHABLE();
break;
case MFC1:
- Format(instr, "mfc1 'rt, 'fs");
+ Format(instr, "mfc1 'rt, 'fs");
break;
case MFHC1:
- Format(instr, "mfhc1 'rt, 'fs");
+ Format(instr, "mfhc1 'rt, 'fs");
break;
case MTC1:
- Format(instr, "mtc1 'rt, 'fs");
+ Format(instr, "mtc1 'rt, 'fs");
break;
// These are called "fs" too, although they are not FPU registers.
case CTC1:
- Format(instr, "ctc1 'rt, 'fs");
+ Format(instr, "ctc1 'rt, 'fs");
break;
case CFC1:
- Format(instr, "cfc1 'rt, 'fs");
+ Format(instr, "cfc1 'rt, 'fs");
break;
case MTHC1:
- Format(instr, "mthc1 'rt, 'fs");
+ Format(instr, "mthc1 'rt, 'fs");
break;
case D:
switch (instr->FunctionFieldRaw()) {
Format(instr, "neg.d 'fd, 'fs");
break;
case SQRT_D:
- Format(instr, "sqrt.d 'fd, 'fs");
+ Format(instr, "sqrt.d 'fd, 'fs");
break;
case CVT_W_D:
Format(instr, "cvt.w.d 'fd, 'fs");
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR:
- Format(instr, "jr 'rs");
+ Format(instr, "jr 'rs");
break;
case JALR:
- Format(instr, "jalr 'rs");
+ Format(instr, "jalr 'rs");
break;
case SLL:
if ( 0x0 == static_cast<int>(instr->InstructionBits()))
Format(instr, "nop");
else
- Format(instr, "sll 'rd, 'rt, 'sa");
+ Format(instr, "sll 'rd, 'rt, 'sa");
break;
case SRL:
if (instr->RsValue() == 0) {
- Format(instr, "srl 'rd, 'rt, 'sa");
+ Format(instr, "srl 'rd, 'rt, 'sa");
} else {
if (mips32r2) {
- Format(instr, "rotr 'rd, 'rt, 'sa");
+ Format(instr, "rotr 'rd, 'rt, 'sa");
} else {
Unknown(instr);
}
}
break;
case SRA:
- Format(instr, "sra 'rd, 'rt, 'sa");
+ Format(instr, "sra 'rd, 'rt, 'sa");
break;
case SLLV:
- Format(instr, "sllv 'rd, 'rt, 'rs");
+ Format(instr, "sllv 'rd, 'rt, 'rs");
break;
case SRLV:
if (instr->SaValue() == 0) {
- Format(instr, "srlv 'rd, 'rt, 'rs");
+ Format(instr, "srlv 'rd, 'rt, 'rs");
} else {
if (mips32r2) {
- Format(instr, "rotrv 'rd, 'rt, 'rs");
+ Format(instr, "rotrv 'rd, 'rt, 'rs");
} else {
Unknown(instr);
}
}
break;
case SRAV:
- Format(instr, "srav 'rd, 'rt, 'rs");
+ Format(instr, "srav 'rd, 'rt, 'rs");
break;
case MFHI:
- Format(instr, "mfhi 'rd");
+ Format(instr, "mfhi 'rd");
break;
case MFLO:
- Format(instr, "mflo 'rd");
+ Format(instr, "mflo 'rd");
break;
case MULT:
- Format(instr, "mult 'rs, 'rt");
+ Format(instr, "mult 'rs, 'rt");
break;
case MULTU:
- Format(instr, "multu 'rs, 'rt");
+ Format(instr, "multu 'rs, 'rt");
break;
case DIV:
- Format(instr, "div 'rs, 'rt");
+ Format(instr, "div 'rs, 'rt");
break;
case DIVU:
- Format(instr, "divu 'rs, 'rt");
+ Format(instr, "divu 'rs, 'rt");
break;
case ADD:
- Format(instr, "add 'rd, 'rs, 'rt");
+ Format(instr, "add 'rd, 'rs, 'rt");
break;
case ADDU:
- Format(instr, "addu 'rd, 'rs, 'rt");
+ Format(instr, "addu 'rd, 'rs, 'rt");
break;
case SUB:
- Format(instr, "sub 'rd, 'rs, 'rt");
+ Format(instr, "sub 'rd, 'rs, 'rt");
break;
case SUBU:
- Format(instr, "sub 'rd, 'rs, 'rt");
+ Format(instr, "subu 'rd, 'rs, 'rt");
break;
case AND:
- Format(instr, "and 'rd, 'rs, 'rt");
+ Format(instr, "and 'rd, 'rs, 'rt");
break;
case OR:
if (0 == instr->RsValue()) {
- Format(instr, "mov 'rd, 'rt");
+ Format(instr, "mov 'rd, 'rt");
} else if (0 == instr->RtValue()) {
- Format(instr, "mov 'rd, 'rs");
+ Format(instr, "mov 'rd, 'rs");
} else {
- Format(instr, "or 'rd, 'rs, 'rt");
+ Format(instr, "or 'rd, 'rs, 'rt");
}
break;
case XOR:
- Format(instr, "xor 'rd, 'rs, 'rt");
+ Format(instr, "xor 'rd, 'rs, 'rt");
break;
case NOR:
- Format(instr, "nor 'rd, 'rs, 'rt");
+ Format(instr, "nor 'rd, 'rs, 'rt");
break;
case SLT:
- Format(instr, "slt 'rd, 'rs, 'rt");
+ Format(instr, "slt 'rd, 'rs, 'rt");
break;
case SLTU:
- Format(instr, "sltu 'rd, 'rs, 'rt");
+ Format(instr, "sltu 'rd, 'rs, 'rt");
break;
case BREAK:
Format(instr, "break, code: 'code");
break;
case TGE:
- Format(instr, "tge 'rs, 'rt, code: 'code");
+ Format(instr, "tge 'rs, 'rt, code: 'code");
break;
case TGEU:
- Format(instr, "tgeu 'rs, 'rt, code: 'code");
+ Format(instr, "tgeu 'rs, 'rt, code: 'code");
break;
case TLT:
- Format(instr, "tlt 'rs, 'rt, code: 'code");
+ Format(instr, "tlt 'rs, 'rt, code: 'code");
break;
case TLTU:
- Format(instr, "tltu 'rs, 'rt, code: 'code");
+ Format(instr, "tltu 'rs, 'rt, code: 'code");
break;
case TEQ:
- Format(instr, "teq 'rs, 'rt, code: 'code");
+ Format(instr, "teq 'rs, 'rt, code: 'code");
break;
case TNE:
- Format(instr, "tne 'rs, 'rt, code: 'code");
+ Format(instr, "tne 'rs, 'rt, code: 'code");
break;
case MOVZ:
- Format(instr, "movz 'rd, 'rs, 'rt");
+ Format(instr, "movz 'rd, 'rs, 'rt");
break;
case MOVN:
- Format(instr, "movn 'rd, 'rs, 'rt");
+ Format(instr, "movn 'rd, 'rs, 'rt");
break;
case MOVCI:
if (instr->Bit(16)) {
- Format(instr, "movt 'rd, 'rs, 'Cc");
+ Format(instr, "movt 'rd, 'rs, 'bc");
} else {
- Format(instr, "movf 'rd, 'rs, 'Cc");
+ Format(instr, "movf 'rd, 'rs, 'bc");
}
break;
default:
case SPECIAL2:
switch (instr->FunctionFieldRaw()) {
case MUL:
- Format(instr, "mul 'rd, 'rs, 'rt");
+ Format(instr, "mul 'rd, 'rs, 'rt");
break;
case CLZ:
- Format(instr, "clz 'rd, 'rs");
+ Format(instr, "clz 'rd, 'rs");
break;
default:
UNREACHABLE();
switch (instr->FunctionFieldRaw()) {
case INS: {
if (mips32r2) {
- Format(instr, "ins 'rt, 'rs, 'sd, 'sa");
+ Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
} else {
Unknown(instr);
}
}
case EXT: {
if (mips32r2) {
- Format(instr, "ext 'rt, 'rs, 'sd, 'sa");
+ Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
} else {
Unknown(instr);
}
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
- Format(instr, "bltz 'rs, 'imm16u");
+ Format(instr, "bltz 'rs, 'imm16u");
break;
case BLTZAL:
- Format(instr, "bltzal 'rs, 'imm16u");
+ Format(instr, "bltzal 'rs, 'imm16u");
break;
case BGEZ:
- Format(instr, "bgez 'rs, 'imm16u");
+ Format(instr, "bgez 'rs, 'imm16u");
break;
case BGEZAL:
- Format(instr, "bgezal 'rs, 'imm16u");
+ Format(instr, "bgezal 'rs, 'imm16u");
break;
default:
UNREACHABLE();
break; // Case REGIMM.
// ------------- Branch instructions.
case BEQ:
- Format(instr, "beq 'rs, 'rt, 'imm16u");
+ Format(instr, "beq 'rs, 'rt, 'imm16u");
break;
case BNE:
- Format(instr, "bne 'rs, 'rt, 'imm16u");
+ Format(instr, "bne 'rs, 'rt, 'imm16u");
break;
case BLEZ:
- Format(instr, "blez 'rs, 'imm16u");
+ Format(instr, "blez 'rs, 'imm16u");
break;
case BGTZ:
- Format(instr, "bgtz 'rs, 'imm16u");
+ Format(instr, "bgtz 'rs, 'imm16u");
break;
// ------------- Arithmetic instructions.
case ADDI:
- Format(instr, "addi 'rt, 'rs, 'imm16s");
+ Format(instr, "addi 'rt, 'rs, 'imm16s");
break;
case ADDIU:
- Format(instr, "addiu 'rt, 'rs, 'imm16s");
+ Format(instr, "addiu 'rt, 'rs, 'imm16s");
break;
case SLTI:
- Format(instr, "slti 'rt, 'rs, 'imm16s");
+ Format(instr, "slti 'rt, 'rs, 'imm16s");
break;
case SLTIU:
- Format(instr, "sltiu 'rt, 'rs, 'imm16u");
+ Format(instr, "sltiu 'rt, 'rs, 'imm16u");
break;
case ANDI:
- Format(instr, "andi 'rt, 'rs, 'imm16x");
+ Format(instr, "andi 'rt, 'rs, 'imm16x");
break;
case ORI:
- Format(instr, "ori 'rt, 'rs, 'imm16x");
+ Format(instr, "ori 'rt, 'rs, 'imm16x");
break;
case XORI:
- Format(instr, "xori 'rt, 'rs, 'imm16x");
+ Format(instr, "xori 'rt, 'rs, 'imm16x");
break;
case LUI:
- Format(instr, "lui 'rt, 'imm16x");
+ Format(instr, "lui 'rt, 'imm16x");
break;
// ------------- Memory instructions.
case LB:
- Format(instr, "lb 'rt, 'imm16s('rs)");
+ Format(instr, "lb 'rt, 'imm16s('rs)");
break;
case LH:
- Format(instr, "lh 'rt, 'imm16s('rs)");
+ Format(instr, "lh 'rt, 'imm16s('rs)");
break;
case LWL:
- Format(instr, "lwl 'rt, 'imm16s('rs)");
+ Format(instr, "lwl 'rt, 'imm16s('rs)");
break;
case LW:
- Format(instr, "lw 'rt, 'imm16s('rs)");
+ Format(instr, "lw 'rt, 'imm16s('rs)");
break;
case LBU:
- Format(instr, "lbu 'rt, 'imm16s('rs)");
+ Format(instr, "lbu 'rt, 'imm16s('rs)");
break;
case LHU:
- Format(instr, "lhu 'rt, 'imm16s('rs)");
+ Format(instr, "lhu 'rt, 'imm16s('rs)");
break;
case LWR:
- Format(instr, "lwr 'rt, 'imm16s('rs)");
+ Format(instr, "lwr 'rt, 'imm16s('rs)");
break;
case SB:
- Format(instr, "sb 'rt, 'imm16s('rs)");
+ Format(instr, "sb 'rt, 'imm16s('rs)");
break;
case SH:
- Format(instr, "sh 'rt, 'imm16s('rs)");
+ Format(instr, "sh 'rt, 'imm16s('rs)");
break;
case SWL:
- Format(instr, "swl 'rt, 'imm16s('rs)");
+ Format(instr, "swl 'rt, 'imm16s('rs)");
break;
case SW:
- Format(instr, "sw 'rt, 'imm16s('rs)");
+ Format(instr, "sw 'rt, 'imm16s('rs)");
break;
case SWR:
- Format(instr, "swr 'rt, 'imm16s('rs)");
+ Format(instr, "swr 'rt, 'imm16s('rs)");
break;
case LWC1:
- Format(instr, "lwc1 'ft, 'imm16s('rs)");
+ Format(instr, "lwc1 'ft, 'imm16s('rs)");
break;
case LDC1:
- Format(instr, "ldc1 'ft, 'imm16s('rs)");
+ Format(instr, "ldc1 'ft, 'imm16s('rs)");
break;
case SWC1:
- Format(instr, "swc1 'ft, 'imm16s('rs)");
+ Format(instr, "swc1 'ft, 'imm16s('rs)");
break;
case SDC1:
- Format(instr, "sdc1 'ft, 'imm16s('rs)");
+ Format(instr, "sdc1 'ft, 'imm16s('rs)");
break;
default:
UNREACHABLE();
void Decoder::DecodeTypeJump(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
case J:
- Format(instr, "j 'imm26");
+ Format(instr, "j 'imm26");
break;
case JAL:
- Format(instr, "jal 'imm26");
+ Format(instr, "jal 'imm26");
break;
default:
UNREACHABLE();
// Disassemble the instruction at *instr_ptr into the output buffer.
-int Decoder::InstructionDecode(byte_* instr_ptr) {
+int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
namespace disasm {
-using v8::internal::byte_;
-
-const char* NameConverter::NameOfAddress(byte_* addr) const {
+const char* NameConverter::NameOfAddress(byte* addr) const {
v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
return tmp_buffer_.start();
}
-const char* NameConverter::NameOfConstant(byte_* addr) const {
+const char* NameConverter::NameOfConstant(byte* addr) const {
return NameOfAddress(addr);
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
- UNREACHABLE(); // MIPS does not have the concept of a byte register
+ UNREACHABLE(); // MIPS does not have the concept of a byte register.
return "nobytereg";
}
-const char* NameConverter::NameInCode(byte_* addr) const {
+const char* NameConverter::NameInCode(byte* addr) const {
// The default name converter is called for unknown code. So we will not try
// to access any memory.
return "";
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte_* instruction) {
+ byte* instruction) {
v8::internal::Decoder d(converter_, buffer);
return d.InstructionDecode(instruction);
}
// The MIPS assembler does not currently use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
return -1;
}
-void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
NameConverter converter;
Disassembler d(converter);
- for (byte_* pc = begin; pc < end;) {
+ for (byte* pc = begin; pc < end;) {
v8::internal::EmbeddedVector<char, 128> buffer;
buffer[0] = '\0';
- byte_* prev_pc = pc;
+ byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
fprintf(f, "%p %08x %s\n",
prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
Address ExitFrame::ComputeStackPointer(Address fp) {
- UNIMPLEMENTED_MIPS();
- return fp;
+ Address marker = Memory::Address_at(fp + ExitFrameConstants::kMarkerOffset);
+ Address sp = fp + ExitFrameConstants::kSPOffset;
+ if (marker == NULL) {
+ sp -= FPURegister::kNumRegisters * kDoubleSize + 2 * kPointerSize;
+ }
+ return sp;
}
// Saved temporaries.
1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 |
1 << 20 | 1 << 21 | 1 << 22 | 1 << 23 |
- // gp, sp, fp
+ // gp, sp, fp.
1 << 28 | 1 << 29 | 1 << 30;
static const int kNumCalleeSaved = 11;
static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
// C/C++ argument slots size.
- static const int kCArgsSlotsSize = 4 * kPointerSize;
+ static const int kCArgSlotCount = 4;
+ static const int kCArgsSlotsSize = kCArgSlotCount * kPointerSize;
// JS argument slots size.
static const int kJSArgsSlotsSize = 0 * kPointerSize;
// Assembly builtins argument slots size.
// next call: mov(a0, v0). This is not needed on the other architectures.
#include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
}
-void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode,
Expression* left,
}
-void FullCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+ Token::Value op,
OverwriteMode mode) {
UNIMPLEMENTED_MIPS();
}
}
-void FullCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
UNIMPLEMENTED_MIPS();
}
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
+ RelocInfo::Mode mode,
+ unsigned ast_id) {
UNIMPLEMENTED_MIPS();
}
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
#if defined(V8_TARGET_ARCH_MIPS)
-#include "codegen-inl.h"
+#include "codegen.h"
#include "code-stubs.h"
#include "ic-inl.h"
#include "runtime.h"
}
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
- Object* map,
- Object* cell,
- bool is_dont_delete) {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- UNIMPLEMENTED_MIPS();
- return false;
-}
-
-
Object* KeyedLoadIC_Miss(Arguments args);
+++ /dev/null
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
-#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
- (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
- (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-
-
-void JumpTarget::DoJump() {
- UNIMPLEMENTED_MIPS();
-}
-
-// Original prototype for mips, needs arch-indep change. Leave out for now.
-// void JumpTarget::DoBranch(Condition cc, Hint ignored,
-// Register src1, const Operand& src2) {
-void JumpTarget::DoBranch(Condition cc, Hint ignored) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::Call() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void JumpTarget::DoBind() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-#undef BRANCH_ARGS_CHECK
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
bool HasEnvironment() const {
UNIMPLEMENTED();
- return NULL;
+ return false;
}
virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
class LChunk: public ZoneObject {
public:
- explicit LChunk(CompilationInfo* info, HGraph* graph) { }
+ explicit LChunk(HGraph* graph) { }
HGraph* graph() const {
UNIMPLEMENTED();
return NULL;
}
- CompilationInfo* info() const { return NULL; }
-
const ZoneList<LPointerMap*>* pointer_maps() const {
UNIMPLEMENTED();
return NULL;
void MarkEmptyBlocks() { UNIMPLEMENTED(); }
+ CompilationInfo* info() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
#ifdef DEBUG
void Verify() { UNIMPLEMENTED(); }
#endif
class LChunkBuilder BASE_EMBEDDED {
public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) { }
+ LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
// Build the sequence for the graph.
LChunk* Build() {
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <limits.h> // For LONG_MIN, LONG_MAX
+#include <limits.h> // For LONG_MIN, LONG_MAX.
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
#include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "debug.h"
#include "runtime.h"
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(void* buffer, int size)
- : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+ : Assembler(arg_isolate, buffer, size),
generating_stub_(false),
- allow_stub_calls_(true),
- code_object_(HEAP->undefined_value()) {
+ allow_stub_calls_(true) {
+ if (isolate() != NULL) {
+ code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+ isolate());
+ }
}
-// Arguments macros
+// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
#define COND_ARGS cond, r1, r2
void MacroAssembler::RecordWriteHelper(Register object,
Register address,
Register scratch) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check that the object is not in new space.
Label not_in_new_space;
InNewSpace(object, scratch, ne, ¬_in_new_space);
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
li(object, Operand(BitCast<int32_t>(kZapValue)));
li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
li(object, Operand(BitCast<int32_t>(kZapValue)));
li(address, Operand(BitCast<int32_t>(kZapValue)));
li(scratch, Operand(BitCast<int32_t>(kZapValue)));
// -----------------------------------------------------------------------------
-// Allocation support
+// Allocation support.
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// TODO(119): Avoid push(holder_reg)/pop(holder_reg).
- Push(holder_reg); // Temporarily save holder on the stack.
+ push(holder_reg); // Temporarily save holder on the stack.
// Read the first word and compare to the global_context_map.
lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
LoadRoot(at, Heap::kGlobalContextMapRootIndex);
Check(eq, "JSGlobalObject::global_context should be a global context.",
holder_reg, Operand(at));
- Pop(holder_reg); // Restore holder.
+ pop(holder_reg); // Restore holder.
}
// Check if both contexts are the same.
Branch(&same_contexts, eq, scratch, Operand(at));
// Check the context is a global context.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// TODO(119): Avoid push(holder_reg)/pop(holder_reg).
- Push(holder_reg); // Temporarily save holder on the stack.
+ push(holder_reg); // Temporarily save holder on the stack.
mov(holder_reg, at); // Move at to its holding place.
LoadRoot(at, Heap::kNullValueRootIndex);
Check(ne, "JSGlobalProxy::context() should not be null.",
Check(eq, "JSGlobalObject::global_context should be a global context.",
holder_reg, Operand(at));
// Restore at is not needed. at is reloaded below.
- Pop(holder_reg); // Restore holder.
+ pop(holder_reg); // Restore holder.
// Restore at to holder's context.
lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
}
// ---------------------------------------------------------------------------
-// Instruction macros
+// Instruction macros.
void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
}
+void MacroAssembler::Neg(Register rs, const Operand& rt) {
+ ASSERT(rt.is_reg());
+ ASSERT(!at.is(rs));
+ ASSERT(!at.is(rt.rm()));
+ li(at, -1);
+ xor_(rs, rt.rm(), at);
+}
+
+
void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
}
// We need always the same number of instructions as we may need to patch
// this code to load another value which may need 2 instructions to load.
- if (is_int16(j.imm32_)) {
- nop();
- addiu(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & kHiMask)) {
- nop();
- ori(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & kImm16Mask)) {
- nop();
- lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
- } else {
- lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
- ori(rd, rd, (j.imm32_ & kImm16Mask));
- }
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+ ori(rd, rd, (j.imm32_ & kImm16Mask));
}
}
-// Exception-generating instructions and debugging support
+// Exception-generating instructions and debugging support.
void MacroAssembler::stop(const char* msg) {
// TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
// We use the 0x54321 value to be able to find it easily when reading memory.
ASSERT(!rs.is(t9));
ASSERT(!rs.is(t8));
- // Save rs's MSB to t8
+ // Save rs's MSB to t8.
And(t8, rs, 0x80000000);
// Remove rs's MSB.
And(t9, rs, 0x7FFFFFFF);
- // Move t9 to fd
+ // Move t9 to fd.
mtc1(t9, fd);
// Convert fd to a real FP value.
Subu(scratch2, scratch2, Operand(zero_exponent));
// Dest already has a Smi zero.
Branch(&done, lt, scratch2, Operand(zero_reg));
- if (!Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+ if (!CpuFeatures::IsSupported(FPU)) {
// We have a shifted exponent between 0 and 30 in scratch2.
srl(dest, scratch2, HeapNumber::kExponentShift);
// We now have the exponent in dest. Subtract from 30 to get
subu(dest, at, dest);
}
bind(&right_exponent);
- if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+ if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// MIPS FPU instructions implementing double precision to integer
// conversion using round to zero. Since the FP value was qualified
}
+void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
+ Register input_high,
+ Register input_low,
+ Register scratch) {
+ Label done, normal_exponent, restore_sign;
+ // Extract the biased exponent in result.
+ Ext(result,
+ input_high,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+
+ // Check for Infinity and NaNs, which should return 0.
+ Subu(scratch, result, HeapNumber::kExponentMask);
+ movz(result, zero_reg, scratch);
+ Branch(&done, eq, scratch, Operand(zero_reg));
+
+ // Express exponent as delta to (number of mantissa bits + 31).
+ Subu(result,
+ result,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ Branch(&normal_exponent, le, result, Operand(zero_reg));
+ mov(result, zero_reg);
+ Branch(&done);
+
+ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+ // Save the sign.
+ Register sign = result;
+ result = no_reg;
+ And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
+ // to check for this specific case.
+ Label high_shift_needed, high_shift_done;
+ Branch(&high_shift_needed, lt, scratch, Operand(32));
+ mov(input_high, zero_reg);
+ Branch(&high_shift_done);
+ bind(&high_shift_needed);
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ Or(input_high,
+ input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ sllv(input_high, input_high, scratch);
+
+ bind(&high_shift_done);
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done;
+ li(at, 32);
+ subu(scratch, at, scratch);
+ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+ // Negate scratch.
+ Subu(scratch, zero_reg, scratch);
+ sllv(input_low, input_low, scratch);
+ Branch(&shift_done);
+
+ bind(&pos_shift);
+ srlv(input_low, input_low, scratch);
+
+ bind(&shift_done);
+ Or(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ mov(scratch, sign);
+ result = sign;
+ sign = no_reg;
+ Subu(result, zero_reg, input_high);
+ movz(result, input_high, scratch);
+ bind(&done);
+}
+
+
+void MacroAssembler::GetLeastBitsFromSmi(Register dst,
+ Register src,
+ int num_least_bits) {
+ Ext(dst, src, kSmiTagSize, num_least_bits);
+}
+
+
+void MacroAssembler::GetLeastBitsFromInt32(Register dst,
+ Register src,
+ int num_least_bits) {
+ And(dst, src, Operand((1 << num_least_bits) - 1));
+}
+
+
// Emulated condtional branches do not emit a nop in the branch delay slot.
//
// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
case ne:
bne(rs, r2, offset);
break;
- // Signed comparison
+ // Signed comparison.
case greater:
if (r2.is(zero_reg)) {
bgtz(rs, offset);
li(r2, rt);
bne(rs, r2, offset);
break;
- // Signed comparison
+ // Signed comparison.
case greater:
if (rt.imm32_ == 0) {
bgtz(rs, offset);
offset = shifted_branch_offset(L, false);
bne(rs, r2, offset);
break;
- // Signed comparison
+ // Signed comparison.
case greater:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
offset = shifted_branch_offset(L, false);
bne(rs, r2, offset);
break;
- // Signed comparison
+ // Signed comparison.
case greater:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
bal(offset);
break;
- // Signed comparison
+ // Signed comparison.
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
bal(offset);
break;
- // Signed comparison
+ // Signed comparison.
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
Branch(2, NegateCondition(cond), rs, rt);
j(target.imm32_); // Will generate only one instruction.
}
- } else { // MustUseReg(target)
+ } else { // MustUseReg(target).
li(t9, target);
if (cond == cc_always) {
jr(t9);
int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
- UNIMPLEMENTED_MIPS();
- return 0;
+ return 4 * kInstrSize;
}
int MacroAssembler::CallSize(Register reg) {
- UNIMPLEMENTED_MIPS();
- return 0;
+ return 2 * kInstrSize;
}
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target.is_reg()) {
jalr(target.rm());
- } else { // !target.is_reg()
+ } else { // !target.is_reg().
if (!MustUseReg(target.rmode_)) {
jal(target.imm32_);
- } else { // MustUseReg(target)
+ } else { // MustUseReg(target).
+ // Must record previous source positions before the
+ // li() generates a new code target.
+ positions_recorder()->WriteRecordedPositions();
li(t9, target);
jalr(t9);
}
Branch(2, NegateCondition(cond), rs, rt);
jalr(target.rm());
}
- } else { // !target.is_reg()
+ } else { // !target.is_reg().
if (!MustUseReg(target.rmode_)) {
if (cond == cc_always) {
jal(target.imm32_);
}
+void MacroAssembler::CallWithAstId(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
+ ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
+ ASSERT(ast_id != kNoASTId);
+ ASSERT(ast_id_for_reloc_info_ == kNoASTId);
+ ast_id_for_reloc_info_ = ast_id;
+ Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
+}
+
+
void MacroAssembler::Drop(int count,
Condition cond,
Register reg,
// ---------------------------------------------------------------------------
-// Exception handling
+// Exception handling.
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
li(result, 0x7091);
li(scratch1, 0x7191);
lw(result, MemOperand(topaddr));
lw(t9, MemOperand(topaddr, kPointerSize));
} else {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Assert that result actually contains top on entry. t9 is used
// immediately below so this use of t9 does not cause difference with
// respect to register content between debug and release mode.
Label* gc_required,
AllocationFlags flags) {
if (!FLAG_inline_new) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
li(result, 0x7091);
li(scratch1, 0x7191);
lw(result, MemOperand(topaddr));
lw(t9, MemOperand(topaddr, kPointerSize));
} else {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Assert that result actually contains top on entry. t9 is used
// immediately below so this use of t9 does not cause difference with
// respect to register content between debug and release mode.
Branch(gc_required, Ugreater, scratch2, Operand(t9));
// Update allocation top. result temporarily holds the new top.
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
And(t9, scratch2, Operand(kObjectAlignmentMask));
Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
}
}
+void MacroAssembler::CopyBytes(Register src,
+ Register dst,
+ Register length,
+ Register scratch) {
+ Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+
+ // Align src before copying in word size chunks.
+ bind(&align_loop);
+ Branch(&done, eq, length, Operand(zero_reg));
+ bind(&align_loop_1);
+ And(scratch, src, kPointerSize - 1);
+ Branch(&word_loop, eq, scratch, Operand(zero_reg));
+ lbu(scratch, MemOperand(src));
+ Addu(src, src, 1);
+ sb(scratch, MemOperand(dst));
+ Addu(dst, dst, 1);
+ Subu(length, length, Operand(1));
+ Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+
+ // Copy bytes in word size chunks.
+ bind(&word_loop);
+ if (FLAG_debug_code) {
+ And(scratch, src, kPointerSize - 1);
+ Assert(eq, "Expecting alignment for CopyBytes",
+ scratch, Operand(zero_reg));
+ }
+ Branch(&byte_loop, lt, length, Operand(kPointerSize));
+ lw(scratch, MemOperand(src));
+ Addu(src, src, kPointerSize);
+
+ // TODO(kalmard) check if this can be optimized to use sw in most cases.
+ // Can't use unaligned access - copy byte by byte.
+ sb(scratch, MemOperand(dst, 0));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 3));
+ Addu(dst, dst, 4);
+
+ Subu(length, length, Operand(kPointerSize));
+ Branch(&word_loop);
+
+ // Copy the last bytes if any left.
+ bind(&byte_loop);
+ Branch(&done, eq, length, Operand(zero_reg));
+ bind(&byte_loop_1);
+ lbu(scratch, MemOperand(src));
+ Addu(src, src, 1);
+ sb(scratch, MemOperand(dst));
+ Addu(dst, dst, 1);
+ Subu(length, length, Operand(1));
+ Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+ bind(&done);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
}
+void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+ if (IsMipsSoftFloatABI) {
+ mtc1(v0, dst);
+ mtc1(v1, FPURegister::from_code(dst.code() + 1));
+ } else {
+ if (!dst.is(f0)) {
+ mov_d(dst, f0); // Reg f0 is o32 ABI FP return value.
+ }
+ }
+}
+
+
// -----------------------------------------------------------------------------
-// JavaScript invokes
+// JavaScript invokes.
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
li(a2, Operand(expected.immediate()));
}
}
+ } else if (actual.is_immediate()) {
+ Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
+ li(a0, Operand(actual.immediate()));
} else {
- if (actual.is_immediate()) {
- Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
- li(a0, Operand(actual.immediate()));
- } else {
- Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
- }
+ Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
}
if (!definitely_matches) {
// -----------------------------------------------------------------------------
-// Runtime calls
+// Runtime calls.
void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
}
-
void MacroAssembler::SmiToDoubleFPURegister(Register smi,
FPURegister value,
Register scratch1) {
}
+void MacroAssembler::AdduAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ ASSERT(!dst.is(overflow_dst));
+ ASSERT(!dst.is(scratch));
+ ASSERT(!overflow_dst.is(scratch));
+ ASSERT(!overflow_dst.is(left));
+ ASSERT(!overflow_dst.is(right));
+ ASSERT(!left.is(right));
+
+ // TODO(kalmard) There must be a way to optimize dst == left and dst == right
+ // cases.
+
+ if (dst.is(left)) {
+ addu(overflow_dst, left, right);
+ xor_(dst, overflow_dst, left);
+ xor_(scratch, overflow_dst, right);
+ and_(scratch, scratch, dst);
+ mov(dst, overflow_dst);
+ mov(overflow_dst, scratch);
+ } else if (dst.is(right)) {
+ addu(overflow_dst, left, right);
+ xor_(dst, overflow_dst, right);
+ xor_(scratch, overflow_dst, left);
+ and_(scratch, scratch, dst);
+ mov(dst, overflow_dst);
+ mov(overflow_dst, scratch);
+ } else {
+ addu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, dst, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+}
+
+
+void MacroAssembler::SubuAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch) {
+ ASSERT(!dst.is(overflow_dst));
+ ASSERT(!dst.is(scratch));
+ ASSERT(!overflow_dst.is(scratch));
+ ASSERT(!overflow_dst.is(left));
+ ASSERT(!overflow_dst.is(right));
+ ASSERT(!left.is(right));
+ ASSERT(!scratch.is(left));
+ ASSERT(!scratch.is(right));
+
+ // TODO(kalmard) There must be a way to optimize dst == left and dst == right
+ // cases.
+
+ if (dst.is(left)) {
+ subu(overflow_dst, left, right);
+ xor_(scratch, overflow_dst, left);
+ xor_(dst, left, right);
+ and_(scratch, scratch, dst);
+ mov(dst, overflow_dst);
+ mov(overflow_dst, scratch);
+ } else if (dst.is(right)) {
+ subu(overflow_dst, left, right);
+ xor_(dst, left, right);
+ xor_(scratch, overflow_dst, left);
+ and_(scratch, scratch, dst);
+ mov(dst, overflow_dst);
+ mov(overflow_dst, scratch);
+ } else {
+ subu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+}
+
+
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments) {
// All parameters are on the stack. v0 has the return value after call.
// -----------------------------------------------------------------------------
-// Debugging
+// Debugging.
void MacroAssembler::Assert(Condition cc, const char* msg,
Register rs, Operand rt) {
- if (FLAG_debug_code)
+ if (emit_debug_code())
Check(cc, msg, rs, rt);
}
void MacroAssembler::AssertRegisterIsRoot(Register reg,
Heap::RootListIndex index) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
LoadRoot(at, index);
Check(eq, "Register did not match expected root", reg, Operand(at));
}
void MacroAssembler::AssertFastElements(Register elements) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
ASSERT(!elements.is(at));
Label ok;
- Push(elements);
+ push(elements);
lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
LoadRoot(at, Heap::kFixedArrayMapRootIndex);
Branch(&ok, eq, elements, Operand(at));
Branch(&ok, eq, elements, Operand(at));
Abort("JSObject with fast elements map has slow elements");
bind(&ok);
- Pop(elements);
+ pop(elements);
}
}
Label L;
Branch(&L, cc, rs, rt);
Abort(msg);
- // will not return here
+ // Will not return here.
bind(&L);
}
AllowStubCallsScope allow_scope(this, true);
li(a0, Operand(p0));
- Push(a0);
+ push(a0);
li(a0, Operand(Smi::FromInt(p1 - p0)));
- Push(a0);
+ push(a0);
CallRuntime(Runtime::kAbort, 2);
- // will not return here
+ // Will not return here.
if (is_trampoline_pool_blocked()) {
// If the calling code cares about the exact number of
// instructions generated, we insert padding here to keep the size
lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
}
- // The context may be an intermediate context, not a function context.
- lw(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- } else { // Slot is in the current function context.
- // The context may be an intermediate context, not a function context.
- lw(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in esi).
+ Move(dst, cp);
+ }
+
+ // We should not have found a 'with' context by walking the context chain
+ // (i.e., the static scope chain and runtime context chain do not agree).
+ // A variable occurring in such a scope should have slot type LOOKUP and
+ // not CONTEXT.
+ if (emit_debug_code()) {
+ lw(t9, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ Check(eq, "Yo dawg, I heard you liked function contexts "
+ "so I put function contexts in all your contexts",
+ dst, Operand(t9));
}
}
Register scratch) {
// Load the initial map. The global functions all have initial maps.
lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok, fail;
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
Branch(&ok);
// t9 = sp + kPointerSize * #args
addu(t9, sp, t8);
- // Compute the argv pointer and keep it in a callee-saved register.
- // This only seems to be needed for crankshaft and may cause problems
- // so it's disabled for now.
- // Subu(s6, t9, Operand(kPointerSize));
-
// Align the stack at this point.
AlignStack(0);
mov(fp, sp); // Setup new frame pointer.
li(t8, Operand(CodeObject()));
- Push(t8); // Accessed from ExitFrame::code_slot.
+ push(t8); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
}
+void MacroAssembler::AbortIfNotString(Register object) {
+ STATIC_ASSERT(kSmiTag == 0);
+ And(t0, object, Operand(kSmiTagMask));
+ Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
+ push(object);
+ lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+ Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
+ pop(object);
+}
+
+
void MacroAssembler::AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message) {
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = ActivationFrameAlignment();
- // Reserve space for Isolate address which is always passed as last parameter
- num_arguments += 1;
-
// Up to four simple arguments are passed in registers a0..a3.
// Those four arguments must have reserved argument slots on the stack for
// mips, even though those argument slots are not normally used.
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
- CallCFunctionHelper(no_reg, function, at, num_arguments);
+ CallCFunctionHelper(no_reg, function, t8, num_arguments);
}
ExternalReference function_reference,
Register scratch,
int num_arguments) {
- // Push Isolate address as the last argument.
- if (num_arguments < kRegisterPassedArguments) {
- Register arg_to_reg[] = {a0, a1, a2, a3};
- Register r = arg_to_reg[num_arguments];
- li(r, Operand(ExternalReference::isolate_address()));
- } else {
- int stack_passed_arguments = num_arguments - kRegisterPassedArguments +
- (StandardFrameConstants::kCArgsSlotsSize /
- kPointerSize);
- // Push Isolate address on the stack after the arguments.
- li(scratch, Operand(ExternalReference::isolate_address()));
- sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
- }
- num_arguments += 1;
-
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
- if (!function.is(t9)) {
- mov(t9, function);
- function = t9;
- }
if (function.is(no_reg)) {
- li(t9, Operand(function_reference));
+ function = t9;
+ li(function, Operand(function_reference));
+ } else if (!function.is(t9)) {
+ mov(t9, function);
function = t9;
}
#undef BRANCH_ARGS_CHECK
-#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
- masm_(address, size_ + Assembler::kGap) {
+ masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
}
-void CodePatcher::Emit(Instr x) {
- masm()->emit(x);
+void CodePatcher::Emit(Instr instr) {
+ masm()->emit(instr);
}
}
-#endif // ENABLE_DEBUGGER_SUPPORT
+void CodePatcher::ChangeBranchCondition(Condition cond) {
+ Instr instr = Assembler::instr_at(masm_.pc_);
+ ASSERT(Assembler::IsBranch(instr));
+ uint32_t opcode = Assembler::GetOpcodeField(instr);
+ // Currently only the 'eq' and 'ne' cond values are supported and the simple
+ // branch instructions (with opcode being the branch type).
+ // There are some special cases (see Assembler::IsBranch()) so extending this
+ // would be tricky.
+ ASSERT(opcode == BEQ ||
+ opcode == BNE ||
+ opcode == BLEZ ||
+ opcode == BGTZ ||
+ opcode == BEQL ||
+ opcode == BNEL ||
+ opcode == BLEZL ||
+ opcode == BGTZL);
+ opcode = (cond == eq) ? BEQ : BNE;
+ instr = (instr & ~kOpcodeMask) | opcode;
+ masm_.emit(instr);
+}
} } // namespace v8::internal
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// Registers aliases
// cp is assumed to be a callee saved register.
const Register roots = s6; // Roots array pointer.
-const Register cp = s7; // JavaScript context pointer
-const Register fp = s8_fp; // Alias fp
-// Register used for condition evaluation.
+const Register cp = s7; // JavaScript context pointer.
+const Register fp = s8_fp; // Alias for fp.
+// Registers used for condition evaluation.
const Register condReg1 = s4;
const Register condReg2 = s5;
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- MacroAssembler(void* buffer, int size);
+ // The isolate parameter can be NULL if the macro assembler should
+ // not use isolate-dependent functionality. In this case, it's the
+ // responsibility of the caller to never invoke such function on the
+ // macro assembler.
+ MacroAssembler(Isolate* isolate, void* buffer, int size);
-// Arguments macros
+// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
#define COND_ARGS cond, r1, r2
-// ** Prototypes
+// Prototypes.
-// * Prototypes for functions with no target (eg Ret()).
+// Prototypes for functions with no target (eg Ret()).
#define DECLARE_NOTARGET_PROTOTYPE(Name) \
void Name(BranchDelaySlot bd = PROTECT); \
void Name(COND_TYPED_ARGS, BranchDelaySlot bd = PROTECT); \
Name(COND_ARGS, bd); \
}
-// * Prototypes for functions with a target.
+// Prototypes for functions with a target.
// Cases when relocation may be needed.
#define DECLARE_RELOC_PROTOTYPE(Name, target_type) \
Name(target, COND_ARGS, bd); \
}
-// ** Target prototypes.
+// Target prototypes.
#define DECLARE_JUMP_CALL_PROTOTYPES(Name) \
DECLARE_NORELOC_PROTOTYPE(Name, Register) \
#undef DECLARE_JUMP_CALL_PROTOTYPES
#undef DECLARE_BRANCH_PROTOTYPES
+ void CallWithAstId(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond = al,
+ Register r1 = zero_reg,
+ const Operand& r2 = Operand(zero_reg));
+
+ int CallSize(Register reg);
+ int CallSize(Handle<Code> code, RelocInfo::Mode rmode);
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count,
// ---------------------------------------------------------------------------
- // Inline caching support
+ // Inline caching support.
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// ---------------------------------------------------------------------------
- // Allocation support
+ // Allocation support.
// Allocate an object in new space. The object_size is specified
// either in bytes or in words if the allocation flag SIZE_IN_WORDS
Label* gc_required);
// ---------------------------------------------------------------------------
- // Instruction macros
+ // Instruction macros.
#define DEFINE_INSTRUCTION(instr) \
void instr(Register rd, Register rs, const Operand& rt); \
DEFINE_INSTRUCTION(Or);
DEFINE_INSTRUCTION(Xor);
DEFINE_INSTRUCTION(Nor);
+ DEFINE_INSTRUCTION2(Neg);
DEFINE_INSTRUCTION(Slt);
DEFINE_INSTRUCTION(Sltu);
#undef DEFINE_INSTRUCTION2
- //------------Pseudo-instructions-------------
+ // ---------------------------------------------------------------------------
+ // Pseudo-instructions.
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
-
- // load int32 in the rd register
+ // Load int32 in the rd register.
void li(Register rd, Operand j, bool gen2instr = false);
inline void li(Register rd, int32_t j, bool gen2instr = false) {
li(rd, Operand(j), gen2instr);
li(dst, Operand(value), gen2instr);
}
- // Exception-generating instructions and debugging support
+ // Exception-generating instructions and debugging support.
void stop(const char* msg);
-
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
- // saved in higher memory addresses
+ // saved in higher memory addresses.
void MultiPush(RegList regs);
void MultiPushReversed(RegList regs);
- void Push(Register src) {
+ // Lower case push() for compatibility with arch-independent code.
+ void push(Register src) {
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
- // Push two registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Condition cond = al) {
- ASSERT(cond == al); // Do not support conditional versions yet.
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2) {
Subu(sp, sp, Operand(2 * kPointerSize));
sw(src1, MemOperand(sp, 1 * kPointerSize));
sw(src2, MemOperand(sp, 0 * kPointerSize));
}
- // Push three registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3, Condition cond = al) {
- ASSERT(cond == al); // Do not support conditional versions yet.
- Addu(sp, sp, Operand(3 * -kPointerSize));
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3) {
+ Subu(sp, sp, Operand(3 * kPointerSize));
sw(src1, MemOperand(sp, 2 * kPointerSize));
sw(src2, MemOperand(sp, 1 * kPointerSize));
sw(src3, MemOperand(sp, 0 * kPointerSize));
}
- // Push four registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2,
- Register src3, Register src4, Condition cond = al) {
- ASSERT(cond == al); // Do not support conditional versions yet.
- Addu(sp, sp, Operand(4 * -kPointerSize));
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4) {
+ Subu(sp, sp, Operand(4 * kPointerSize));
sw(src1, MemOperand(sp, 3 * kPointerSize));
sw(src2, MemOperand(sp, 2 * kPointerSize));
sw(src3, MemOperand(sp, 1 * kPointerSize));
sw(src4, MemOperand(sp, 0 * kPointerSize));
}
- inline void push(Register src) { Push(src); }
- inline void pop(Register src) { Pop(src); }
-
void Push(Register src, Condition cond, Register tst1, Register tst2) {
- // Since we don't have conditionnal execution we use a Branch.
+ // Since we don't have conditional execution we use a Branch.
Branch(3, cond, tst1, Operand(tst2));
- Addu(sp, sp, Operand(-kPointerSize));
+ Subu(sp, sp, Operand(kPointerSize));
sw(src, MemOperand(sp, 0));
}
-
// Pops multiple values from the stack and load them in the
// registers specified in regs. Pop order is the opposite as in MultiPush.
void MultiPop(RegList regs);
void MultiPopReversed(RegList regs);
- void Pop(Register dst) {
+
+ // Lower case pop() for compatibility with arch-independent code.
+ void pop(Register dst) {
lw(dst, MemOperand(sp, 0));
Addu(sp, sp, Operand(kPointerSize));
}
+
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2) {
+ ASSERT(!src1.is(src2));
+ lw(src2, MemOperand(sp, 0 * kPointerSize));
+ lw(src1, MemOperand(sp, 1 * kPointerSize));
+ Addu(sp, sp, 2 * kPointerSize);
+ }
+
void Pop(uint32_t count = 1) {
Addu(sp, sp, Operand(count * kPointerSize));
}
FPURegister double_scratch,
Label *not_int32);
+ // Helper for EmitECMATruncate.
+ // This will truncate a floating-point value outside of the singed 32bit
+ // integer range to a 32bit signed integer.
+ // Expects the double value loaded in input_high and input_low.
+ // Exits with the answer in 'result'.
+ // Note that this code does not work for values in the 32bit range!
+ void EmitOutOfInt32RangeTruncate(Register result,
+ Register input_high,
+ Register input_low,
+ Register scratch);
+
// -------------------------------------------------------------------------
- // Activation frames
+ // Activation frames.
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
Register scratch);
// -------------------------------------------------------------------------
- // JavaScript invokes
+ // JavaScript invokes.
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
#ifdef ENABLE_DEBUGGER_SUPPORT
// -------------------------------------------------------------------------
- // Debugger Support
+ // Debugger Support.
void DebugBreak();
#endif
// -------------------------------------------------------------------------
- // Exception handling
+ // Exception handling.
// Push a new try handler and link into try handler chain.
// The return address must be passed in register ra.
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count);
+ // Copies a number of bytes from src to dst. All registers are clobbered. On
+ // exit src and dst will point to the place just after where the last byte was
+ // read or written and length will be zero.
+ void CopyBytes(Register src,
+ Register dst,
+ Register length,
+ Register scratch);
+
// -------------------------------------------------------------------------
// Support functions.
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
- // to be a heap object)
+ // to be a heap object).
void CheckMap(Register obj,
Register scratch,
Handle<Map> map,
// index - holds the overwritten index on exit.
void IndexFromHash(Register hash, Register index);
+ // Get the number of least significant bits from a register.
+ void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+ void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
+
// Load the value of a number object into a FPU double register. If the
// object is not a number a jump to the label not_number is performed
// and the FPU double register is unchanged.
Register scratch1);
// -------------------------------------------------------------------------
- // Runtime calls
+ // Overflow handling functions.
+ // Usage: first call the appropriate arithmetic function, then call one of the
+ // jump functions with the overflow_dst register as the second parameter.
+
+ void AdduAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch = at);
+
+ void SubuAndCheckForOverflow(Register dst,
+ Register left,
+ Register right,
+ Register overflow_dst,
+ Register scratch = at);
+
+ void BranchOnOverflow(Label* label,
+ Register overflow_check,
+ BranchDelaySlot bd = PROTECT) {
+ Branch(label, lt, overflow_check, Operand(zero_reg), bd);
+ }
+
+ void BranchOnNoOverflow(Label* label,
+ Register overflow_check,
+ BranchDelaySlot bd = PROTECT) {
+ Branch(label, ge, overflow_check, Operand(zero_reg), bd);
+ }
+
+ void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+ Ret(lt, overflow_check, Operand(zero_reg), bd);
+ }
+
+ void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+ Ret(ge, overflow_check, Operand(zero_reg), bd);
+ }
+
+ // -------------------------------------------------------------------------
+ // Runtime calls.
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = cc_always,
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, Register scratch, int num_arguments);
+ void GetCFunctionDoubleResult(const DoubleRegister dst);
+
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin);
struct Unresolved {
int pc;
- uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
+ uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
const char* name;
};
- Handle<Object> CodeObject() { return code_object_; }
+ Handle<Object> CodeObject() {
+ ASSERT(!code_object_.is_null());
+ return code_object_;
+ }
// -------------------------------------------------------------------------
- // StatsCounter support
+ // StatsCounter support.
void SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
// -------------------------------------------------------------------------
- // Debugging
+ // Debugging.
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
bool allow_stub_calls() { return allow_stub_calls_; }
// ---------------------------------------------------------------------------
- // Number utilities
+ // Number utilities.
// Check whether the value of reg is a power of two and not zero. If not
// control continues at the label not_power_of_two. If reg is a power of two
Label* not_power_of_two_or_zero);
// -------------------------------------------------------------------------
- // Smi utilities
+ // Smi utilities.
// Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and
void AbortIfSmi(Register object);
void AbortIfNotSmi(Register object);
+ // Abort execution if argument is a string. Used in debug code.
+ void AbortIfNotString(Register object);
+
// Abort execution if argument is not the root value with the given index.
void AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message);
// ---------------------------------------------------------------------------
- // HeapNumber utilities
+ // HeapNumber utilities.
void JumpIfNotHeapNumber(Register object,
Register heap_number_map,
Label* on_not_heap_number);
// -------------------------------------------------------------------------
- // String utilities
+ // String utilities.
// Checks if both instance types are sequential ASCII strings and jumps to
// label if either is not.
};
-#ifdef ENABLE_DEBUGGER_SUPPORT
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. It is not legal to emit
MacroAssembler* masm() { return &masm_; }
// Emit an instruction directly.
- void Emit(Instr x);
+ void Emit(Instr instr);
// Emit an address directly.
void Emit(Address addr);
+ // Change the condition part of an instruction leaving the rest of the current
+ // instruction unchanged.
+ void ChangeBranchCondition(Condition cond);
+
private:
byte* address_; // The address of the code being patched.
int instructions_; // Number of instructions of the expected patch size.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
-#endif // ENABLE_DEBUGGER_SUPPORT
// -----------------------------------------------------------------------------
}
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+static inline MemOperand CFunctionArgumentOperand(int index) {
+ ASSERT(index > StandardFrameConstants::kCArgSlotCount);
+ // Argument 5 takes the slot just past the four Arg-slots.
+ int offset =
+ (index - 5) * kPointerSize + StandardFrameConstants::kCArgsSlotsSize;
+ return MemOperand(sp, offset);
+}
+
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
Mode mode,
int registers_to_save)
- : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+ : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
+++ /dev/null
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-#define V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
-#include "v8.h"
-#include "mips/assembler-mips.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
- // The code for this test relies on the order of register codes.
- return reg.is(cp) || reg.is(s8_fp) || reg.is(sp);
-}
-
-
-int RegisterAllocator::ToNumber(Register reg) {
- ASSERT(reg.is_valid() && !IsReserved(reg));
- const int kNumbers[] = {
- 0, // zero_reg
- 1, // at
- 2, // v0
- 3, // v1
- 4, // a0
- 5, // a1
- 6, // a2
- 7, // a3
- 8, // t0
- 9, // t1
- 10, // t2
- 11, // t3
- 12, // t4
- 13, // t5
- 14, // t
- 15, // t7
- 16, // t8
- 17, // t9
- 18, // s0
- 19, // s1
- 20, // s2
- 21, // s3
- 22, // s4
- 23, // s5
- 24, // s6
- 25, // s7
- 26, // k0
- 27, // k1
- 28, // gp
- 29, // sp
- 30, // s8_fp
- 31, // ra
- };
- return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
- ASSERT(num >= 0 && num < kNumRegisters);
- const Register kRegisters[] = {
- zero_reg,
- at,
- v0,
- v1,
- a0,
- a1,
- a2,
- a3,
- t0,
- t1,
- t2,
- t3,
- t4,
- t5,
- t6,
- t7,
- s0,
- s1,
- s2,
- s3,
- s4,
- s5,
- s6,
- s7,
- t8,
- t9,
- k0,
- k1,
- gp,
- sp,
- s8_fp,
- ra
- };
- return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
- Reset();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
-
+++ /dev/null
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void Result::ToRegister(Register target) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
- // No byte registers on MIPS.
- UNREACHABLE();
- return Result();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
+++ /dev/null
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-#define V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-
-#include "mips/constants-mips.h"
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
- // No registers are currently managed by the register allocator on MIPS.
- static const int kNumRegisters = 0;
- static const int kInvalidRegister = -1;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
-
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
#include "disasm.h"
#include "assembler.h"
-#include "globals.h" // Need the BitCast
+#include "globals.h" // Need the BitCast.
#include "mips/constants-mips.h"
#include "mips/simulator-mips.h"
namespace v8 {
namespace internal {
-// Utils functions
+// Utils functions.
bool HaveSameSign(int32_t a, int32_t b) {
return ((a ^ b) >= 0);
}
}
-#else // ndef GENERATED_CODE_COVERAGE
+#else // GENERATED_CODE_COVERAGE
#define UNSUPPORTED() printf("Unsupported instruction.\n");
#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
PrintF("\n");
- // at, v0, a0
+ // at, v0, a0.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(1), REG_INFO(2), REG_INFO(4));
- // v1, a1
+ // v1, a1.
PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
"", REG_INFO(3), REG_INFO(5));
- // a2
+ // a2.
PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6));
- // a3
+ // a3.
PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(7));
PrintF("\n");
// t0-t7, s0-s7
REG_INFO(8+i), REG_INFO(16+i));
}
PrintF("\n");
- // t8, k0, LO
+ // t8, k0, LO.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(24), REG_INFO(26), REG_INFO(32));
- // t9, k1, HI
+ // t9, k1, HI.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(25), REG_INFO(27), REG_INFO(33));
- // sp, fp, gp
+ // sp, fp, gp.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(29), REG_INFO(30), REG_INFO(28));
- // pc
+ // pc.
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(31), REG_INFO(34));
PrintAllRegs();
PrintF("\n\n");
- // f0, f1, f2, ... f31
+ // f0, f1, f2, ... f31.
PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(0) );
PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(2) );
PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(4) );
char arg2[ARG_SIZE + 1];
char* argv[3] = { cmd, arg1, arg2 };
- // make sure to have a proper terminating character if reaching the limit
+ // Make sure to have a proper terminating character if reaching the limit.
cmd[COMMAND_SIZE] = 0;
arg1[ARG_SIZE] = 0;
arg2[ARG_SIZE] = 0;
if (last_pc != sim_->get_pc()) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
+ // Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer,
- reinterpret_cast<byte_*>(sim_->get_pc()));
+ reinterpret_cast<byte*>(sim_->get_pc()));
PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
last_pc = sim_->get_pc();
}
if (strcmp(cmd, "stack") == 0) {
cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
- } else { // "mem"
+ } else { // Command "mem".
int32_t value;
if (!GetValue(arg1, &value)) {
PrintF("%s unrecognized\n", arg1);
} else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0)) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
+ // Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- byte_* cur = NULL;
- byte_* end = NULL;
+ byte* cur = NULL;
+ byte* end = NULL;
if (argc == 1) {
- cur = reinterpret_cast<byte_*>(sim_->get_pc());
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
end = cur + (10 * Instruction::kInstrSize);
} else if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte_*>(value);
- // no length parameter passed, assume 10 instructions
+ cur = reinterpret_cast<byte*>(value);
+ // No length parameter passed, assume 10 instructions.
end = cur + (10 * Instruction::kInstrSize);
}
} else {
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte_*>(value1);
+ cur = reinterpret_cast<byte*>(value1);
end = cur + (value2 * Instruction::kInstrSize);
}
}
} else if (strcmp(cmd, "unstop") == 0) {
PrintF("Unstop command not implemented on MIPS.");
} else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
- // Print registers and disassemble
+ // Print registers and disassemble.
PrintAllRegs();
PrintF("\n");
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
+ // Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- byte_* cur = NULL;
- byte_* end = NULL;
+ byte* cur = NULL;
+ byte* end = NULL;
if (argc == 1) {
- cur = reinterpret_cast<byte_*>(sim_->get_pc());
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
end = cur + (10 * Instruction::kInstrSize);
} else if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte_*>(value);
+ cur = reinterpret_cast<byte*>(value);
// no length parameter passed, assume 10 instructions
end = cur + (10 * Instruction::kInstrSize);
}
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte_*>(value1);
+ cur = reinterpret_cast<byte*>(value1);
end = cur + (value2 * Instruction::kInstrSize);
}
}
CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
- ICacheHash(page),
- true);
+ ICacheHash(page),
+ true);
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
entry->value = new_page;
Initialize(isolate);
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
- stack_size_ = 1 * 1024*1024; // allocate 1MB for stack
stack_ = reinterpret_cast<char*>(malloc(stack_size_));
pc_modified_ = false;
icount_ = 0;
pc_modified_ = true;
}
- // zero register always hold 0.
+ // Zero register always holds 0.
registers_[reg] = (reg == 0) ? 0 : value;
}
int Simulator::ReadW(int32_t addr, Instruction* instr) {
if (addr >=0 && addr < 0x400) {
- // this has to be a NULL-dereference
+ // This has to be a NULL-dereference, drop into debugger.
MipsDebugger dbg(this);
dbg.Debug();
}
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
}
- PrintF("Unaligned read at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
+ PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
MipsDebugger dbg(this);
dbg.Debug();
return 0;
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
if (addr >= 0 && addr < 0x400) {
- // this has to be a NULL-dereference
+ // This has to be a NULL-dereference, drop into debugger.
MipsDebugger dbg(this);
dbg.Debug();
}
*ptr = value;
return;
}
- PrintF("Unaligned write at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
+ PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
MipsDebugger dbg(this);
dbg.Debug();
}
double* ptr = reinterpret_cast<double*>(addr);
return *ptr;
}
- PrintF("Unaligned (double) read at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
+ PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
return 0;
}
*ptr = value;
return;
}
- PrintF("Unaligned (double) write at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
+ PrintF("Unaligned (double) write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
}
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
+ PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
return 0;
}
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
- PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
+ PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
return 0;
}
*ptr = value;
return;
}
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
+ PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
}
*ptr = value;
return;
}
- PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr,
- reinterpret_cast<void*>(instr));
+ PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
OS::Abort();
}
int32_t arg2,
int32_t arg3);
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef v8::Handle<v8::Value> (*SimulatorRuntimeApiCall)(int32_t arg0);
+
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. They are also used for debugging with simulator.
void Simulator::SoftwareInterrupt(Instruction* instr) {
// We first check if we met a call_rt_redirected.
if (instr->InstructionBits() == rtCallRedirInstr) {
- // Check if stack is aligned. Error if not aligned is reported below to
- // include information on the function called.
- bool stack_aligned =
- (get_register(sp)
- & (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
Redirection* redirection = Redirection::FromSwiInstruction(instr);
int32_t arg0 = get_register(a0);
int32_t arg1 = get_register(a1);
// stack check here.
int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
int32_t* stack = reinterpret_cast<int32_t*>(stack_);
- if (stack_pointer >= stack && stack_pointer < stack + stack_size_) {
- arg4 = stack_pointer[0];
- arg5 = stack_pointer[1];
+ if (stack_pointer >= stack && stack_pointer < stack + stack_size_ - 5) {
+ // Args 4 and 5 are on the stack after the reserved space for args 0..3.
+ arg4 = stack_pointer[4];
+ arg5 = stack_pointer[5];
}
+
+ bool fp_call =
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_ra = get_register(ra);
intptr_t external =
- reinterpret_cast<int32_t>(redirection->external_function());
+ reinterpret_cast<intptr_t>(redirection->external_function());
// Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
// FPU, or gcc soft-float routines. Hardware FPU is simulated in this
// than the usual int64_t return. The data is returned in different
// registers and cannot be cast from one type to the other. However, the
// calling arguments are passed the same way in both cases.
- if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
+ if (fp_call) {
SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF("Call to host function at %p with args %08x:%08x %08x:%08x",
- FUNCTION_ADDR(target), arg0, arg1, arg2, arg3);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(
+ "Call to host function at %p args %08x, %08x, %08x, %08x\n",
+ FUNCTION_ADDR(target),
+ arg0,
+ arg1,
+ arg2,
+ arg3);
+ }
double result = target(arg0, arg1, arg2, arg3);
// fp result -> registers v0 and v1.
int32_t gpreg_pair[2];
set_register(v0, gpreg_pair[0]);
set_register(v1, gpreg_pair[1]);
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
- PrintF("Mips does not yet support ExternalReference::DIRECT_API_CALL\n");
- ASSERT(redirection->type() != ExternalReference::DIRECT_API_CALL);
- } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
- PrintF("Mips does not support ExternalReference::DIRECT_GETTER_CALL\n");
- ASSERT(redirection->type() != ExternalReference::DIRECT_GETTER_CALL);
+ SimulatorRuntimeApiCall target =
+ reinterpret_cast<SimulatorRuntimeApiCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08x\n",
+ FUNCTION_ADDR(target),
+ arg0);
+ }
+ v8::Handle<v8::Value> result = target(arg0);
+ set_register(v0, (int32_t) *result);
} else {
- // Builtin call.
- ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
PrintF(
- "Call to host function at %p: %08x, %08x, %08x, %08x, %08x, %08x",
+ "Call to host function at %p "
+ "args %08x, %08x, %08x, %08x, %08x, %08x\n",
FUNCTION_ADDR(target),
arg0,
arg1,
arg3,
arg4,
arg5);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
}
-
int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
set_register(v0, static_cast<int32_t>(result));
set_register(v1, static_cast<int32_t>(result >> 32));
set_register(ra, saved_ra);
set_pc(get_register(ra));
- } else if (func == BREAK && code >= 0 && code < 16) {
- // First 16 break_ codes interpreted as debug markers.
+ } else if (func == BREAK && code >= 0 && code < 32) {
+ // First 32 break_ codes interpreted as debug-markers/watchpoints.
MipsDebugger dbg(this);
++break_count_;
PrintF("\n---- break %d marker: %3d (instr count: %8d) ----------"
const int32_t fs_reg = instr->FsValue();
- // ---------- Configuration
+ // ---------- Configuration.
switch (op) {
- case COP1: // Coprocessor instructions
+ case COP1: // Coprocessor instructions.
switch (instr->RsFieldRaw()) {
case BC1: // Handled in DecodeTypeImmed, should never come here.
UNREACHABLE();
} else {
// Logical right-rotate of a word by a fixed number of bits. This
// is special case of SRL instruction, added in MIPS32 Release 2.
- // RS field is equal to 00001
+ // RS field is equal to 00001.
alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
}
break;
} else {
// Logical right-rotate of a word by a variable number of bits.
// This is special case od SRLV instruction, added in MIPS32
- // Release 2. SA field is equal to 00001
+ // Release 2. SA field is equal to 00001.
alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
}
break;
case MULTU:
u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
break;
- case DIV:
- case DIVU:
- exceptions[kDivideByZero] = rt == 0;
- break;
case ADD:
if (HaveSameSign(rs, rt)) {
if (rs > 0) {
case SLTU:
alu_out = rs_u < rt_u ? 1 : 0;
break;
- // Break and trap instructions
+ // Break and trap instructions.
case BREAK:
do_interrupt = true;
case MOVCI:
// No action taken on decode.
break;
+ case DIV:
+ case DIVU:
+ // div and divu never raise exceptions.
+ break;
default:
UNREACHABLE();
};
case SPECIAL3:
switch (instr->FunctionFieldRaw()) {
case INS: { // Mips32r2 instruction.
- // Interpret Rd field as 5-bit msb of insert.
+ // Interpret rd field as 5-bit msb of insert.
uint16_t msb = rd_reg;
// Interpret sa field as 5-bit lsb of insert.
uint16_t lsb = sa;
break;
}
case EXT: { // Mips32r2 instruction.
- // Interpret Rd field as 5-bit msb of extract.
+ // Interpret rd field as 5-bit msb of extract.
uint16_t msb = rd_reg;
// Interpret sa field as 5-bit lsb of extract.
uint16_t lsb = sa;
int64_t i64hilo = 0;
uint64_t u64hilo = 0;
- // ALU output
+ // ALU output.
// It should not be used as is. Instructions using it should always
// initialize it first.
int32_t alu_out = 0x12345678;
// For break and trap instructions.
bool do_interrupt = false;
- // For jr and jalr
+ // For jr and jalr.
// Get current pc.
int32_t current_pc = get_pc();
// Next pc
// ---------- Raise exceptions triggered.
SignalExceptions();
- // ---------- Execution
+ // ---------- Execution.
switch (op) {
case COP1:
switch (instr->RsFieldRaw()) {
- case BC1: // branch on coprocessor condition
+ case BC1: // Branch on coprocessor condition.
UNREACHABLE();
break;
case CFC1:
Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
- set_register(31, current_pc + 2* Instruction::kInstrSize);
+ set_register(31, current_pc + 2 * Instruction::kInstrSize);
set_pc(next_pc);
pc_modified_ = true;
break;
set_register(HI, static_cast<int32_t>(u64hilo >> 32));
break;
case DIV:
- // Divide by zero was checked in the configuration step.
- set_register(LO, rs / rt);
- set_register(HI, rs % rt);
+ // Divide by zero was not checked in the configuration step - div and
+ // divu do not raise exceptions. On division by 0, the result will
+ // be UNPREDICTABLE.
+ if (rt != 0) {
+ set_register(LO, rs / rt);
+ set_register(HI, rs % rt);
+ }
break;
case DIVU:
- set_register(LO, rs_u / rt_u);
- set_register(HI, rs_u % rt_u);
+ if (rt_u != 0) {
+ set_register(LO, rs_u / rt_u);
+ set_register(HI, rs_u % rt_u);
+ }
break;
// Break and trap instructions.
case BREAK:
if (rt) set_register(rd_reg, rs);
break;
case MOVCI: {
- uint32_t cc = instr->FCccValue();
+ uint32_t cc = instr->FBccValue();
uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
- if (instr->Bit(16)) { // Read Tf bit
+ if (instr->Bit(16)) { // Read Tf bit.
if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
} else {
if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
}
-// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq)
+// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq).
void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Instruction fields.
Opcode op = instr->OpcodeFieldRaw();
int32_t rs = get_register(instr->RsValue());
uint32_t rs_u = static_cast<uint32_t>(rs);
- int32_t rt_reg = instr->RtValue(); // destination register
+ int32_t rt_reg = instr->RtValue(); // Destination register.
int32_t rt = get_register(rt_reg);
int16_t imm16 = instr->Imm16Value();
- int32_t ft_reg = instr->FtValue(); // destination register
+ int32_t ft_reg = instr->FtValue(); // Destination register.
// Zero extended immediate.
uint32_t oe_imm16 = 0xffff & imm16;
// Used for memory instructions.
int32_t addr = 0x0;
- // Value to be written in memory
+ // Value to be written in memory.
uint32_t mem_value = 0x0;
- // ---------- Configuration (and execution for REGIMM)
+ // ---------- Configuration (and execution for REGIMM).
switch (op) {
// ------------- COP1. Coprocessor instructions.
case COP1:
cc_value = test_fcsr_bit(fcsr_cc);
do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
execute_branch_delay_instruction = true;
- // Set next_pc
+ // Set next_pc.
if (do_branch) {
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
} else {
UNREACHABLE();
};
break;
- // ------------- REGIMM class
+ // ------------- REGIMM class.
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
case BGEZAL:
// Branch instructions common part.
execute_branch_delay_instruction = true;
- // Set next_pc
+ // Set next_pc.
if (do_branch) {
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
if (instr->IsLinkingInstruction()) {
default:
break;
};
- break; // case REGIMM
- // ------------- Branch instructions
+ break; // case REGIMM.
+ // ------------- Branch instructions.
// When comparing to zero, the encoding of rt field is always 0, so we don't
// need to replace rt with zero.
case BEQ:
case BGTZ:
do_branch = rs > 0;
break;
- // ------------- Arithmetic instructions
+ // ------------- Arithmetic instructions.
case ADDI:
if (HaveSameSign(rs, se_imm16)) {
if (rs > 0) {
case LUI:
alu_out = (oe_imm16 << 16);
break;
- // ------------- Memory instructions
+ // ------------- Memory instructions.
case LB:
addr = rs + se_imm16;
alu_out = ReadB(addr);
alu_out = ReadH(addr, instr);
break;
case LWL: {
- // al_offset is an offset of the effective address within an aligned word
+ // al_offset is offset of the effective address within an aligned word.
uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
uint8_t byte_shift = kPointerAlignmentMask - al_offset;
uint32_t mask = (1 << byte_shift * 8) - 1;
alu_out = ReadHU(addr, instr);
break;
case LWR: {
- // al_offset is an offset of the effective address within an aligned word
+ // al_offset is offset of the effective address within an aligned word.
uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
uint8_t byte_shift = kPointerAlignmentMask - al_offset;
uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
// ---------- Raise exceptions triggered.
SignalExceptions();
- // ---------- Execution
+ // ---------- Execution.
switch (op) {
- // ------------- Branch instructions
+ // ------------- Branch instructions.
case BEQ:
case BNE:
case BLEZ:
case BGTZ:
// Branch instructions common part.
execute_branch_delay_instruction = true;
- // Set next_pc
+ // Set next_pc.
if (do_branch) {
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
if (instr->IsLinkingInstruction()) {
next_pc = current_pc + 2 * Instruction::kInstrSize;
}
break;
- // ------------- Arithmetic instructions
+ // ------------- Arithmetic instructions.
case ADDI:
case ADDIU:
case SLTI:
case LUI:
set_register(rt_reg, alu_out);
break;
- // ------------- Memory instructions
+ // ------------- Memory instructions.
case LB:
case LH:
case LWL:
}
-// Type 3: instructions using a 26 bytes immediate. (eg: j, jal)
+// Type 3: instructions using a 26 bytes immediate. (eg: j, jal).
void Simulator::DecodeTypeJump(Instruction* instr) {
// Get current pc.
int32_t current_pc = get_pc();
// Get unchanged bits of pc.
int32_t pc_high_bits = current_pc & 0xf0000000;
- // Next pc
+ // Next pc.
int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
- // Execute branch delay slot
+ // Execute branch delay slot.
// We don't check for end_sim_pc. First it should not be met as the current pc
// is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
// Update pc and ra if necessary.
// Do this after the branch delay execution.
if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + 2* Instruction::kInstrSize);
+ set_register(31, current_pc + 2 * Instruction::kInstrSize);
}
set_pc(next_pc);
pc_modified_ = true;
if (::v8::internal::FLAG_trace_sim) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
+ // Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer, reinterpret_cast<byte_*>(instr));
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr),
- buffer.start());
+ buffer.start());
}
switch (instr->InstructionType()) {
}
-int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
va_list parameters;
va_start(parameters, argument_count);
- // Setup arguments
+ // Setup arguments.
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
va_end(parameters);
set_register(sp, entry_stack);
- // Prepare to execute the code at entry
+ // Prepare to execute the code at entry.
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
// when the PC reaches this value. By saving the "end simulation" value into
set_register(gp, callee_saved_value);
set_register(fp, callee_saved_value);
- // Start the simulation
+ // Start the simulation.
Execute();
// Check that the callee-saved registers have been preserved.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
entry(p0, p1, p2, p3, p4)
typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, Address, int, Isolate*);
+ int*, Address, int, Isolate*);
+
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7))
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
// Running with a simulator.
#include "hashmap.h"
+#include "assembler.h"
namespace v8 {
namespace internal {
sp,
s8,
ra,
- // LO, HI, and pc
+ // LO, HI, and pc.
LO,
HI,
pc, // pc must be the last register.
// Generated code will always use doubles. So we will only use even registers.
enum FPURegister {
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
- f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters
+ f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters.
f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
f26, f27, f28, f29, f30, f31,
kNumFPURegisters
// instruction.
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
- // Same for FPURegisters
+ // Same for FPURegisters.
void set_fpu_register(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
int size);
static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
-
enum Exception {
none,
kIntegerOverflow,
uint32_t FCSR_;
// Simulator support.
+ // Allocate 1MB for stack.
+ static const size_t stack_size_ = 1 * 1024*1024;
char* stack_;
- size_t stack_size_;
bool pc_modified_;
int icount_;
int break_count_;
- // Icache simulation
+ // Icache simulation.
v8::internal::HashMap* i_cache_;
+ v8::internal::Isolate* isolate_;
+
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
-
- v8::internal::Isolate* isolate_;
};
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
-reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
- Simulator::current(Isolate::Current())->Call( \
- entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
+ Simulator::current(Isolate::Current())->Call( \
+ entry, 8, p0, p1, p2, p3, p4, p5, p6, p7)
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
#if defined(V8_TARGET_ARCH_MIPS)
#include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
#include "stub-cache.h"
namespace v8 {
+++ /dev/null
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_MIPS_INL_H_
-#define V8_VIRTUAL_FRAME_MIPS_INL_H_
-
-#include "assembler-mips.h"
-#include "virtual-frame-mips.h"
-
-namespace v8 {
-namespace internal {
-
-
-MemOperand VirtualFrame::ParameterAt(int index) {
- UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0);
-}
-
-
-// The receiver frame slot.
-MemOperand VirtualFrame::Receiver() {
- UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0);
-}
-
-
-void VirtualFrame::Forget(int count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_VIRTUAL_FRAME_MIPS_INL_H_
+++ /dev/null
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::PopToA1A0() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PopToA1() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PopToA0() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTo(const VirtualFrame* expected,
- Condition cond,
- Register r1,
- const Operand& r2) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected,
- Condition cond,
- Register r1,
- const Operand& r2) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::MergeTOSTo(
- VirtualFrame::TopOfStack expected_top_of_stack_state,
- Condition cond,
- Register r1,
- const Operand& r2) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Enter() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Exit() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallJSFunction(int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
- UNIMPLEMENTED_MIPS();
-}
-#endif
-
-
-void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallKeyedLoadIC() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallKeyedStoreIC() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- int dropped_args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-// NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS.
-const bool VirtualFrame::kA0InUse[TOS_STATES] =
- { false, true, false, true, true };
-const bool VirtualFrame::kA1InUse[TOS_STATES] =
- { false, false, true, true, true };
-const int VirtualFrame::kVirtualElements[TOS_STATES] =
- { 0, 1, 1, 2, 2 };
-const Register VirtualFrame::kTopRegister[TOS_STATES] =
- { a0, a0, a1, a1, a0 };
-const Register VirtualFrame::kBottomRegister[TOS_STATES] =
- { a0, a0, a1, a0, a1 };
-const Register VirtualFrame::kAllocatedRegisters[
- VirtualFrame::kNumberOfAllocatedRegisters] = { a2, a3, t0, t1, t2 };
-// Popping is done by the transition implied by kStateAfterPop. Of course if
-// there were no stack slots allocated to registers then the physical SP must
-// be adjusted.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
- { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, A0_TOS, A1_TOS };
-// Pushing is done by the transition implied by kStateAfterPush. Of course if
-// the maximum number of registers was already allocated to the top of stack
-// slots then one register must be physically pushed onto the stack.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
- { A0_TOS, A1_A0_TOS, A0_A1_TOS, A0_A1_TOS, A1_A0_TOS };
-
-
-void VirtualFrame::Drop(int count) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Pop() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA0() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA1() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToA1A0() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::Peek() {
- UNIMPLEMENTED_MIPS();
- return no_reg;
-}
-
-
-Register VirtualFrame::Peek2() {
- UNIMPLEMENTED_MIPS();
- return no_reg;
-}
-
-
-void VirtualFrame::Dup() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::Dup2() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
- UNIMPLEMENTED_MIPS();
- return no_reg;
-}
-
-
-void VirtualFrame::EnsureOneFreeTOSRegister() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPop(RegList regs) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Register VirtualFrame::GetTOSRegister() {
- UNIMPLEMENTED_MIPS();
- return no_reg;
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPush(RegList regs) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::EmitMultiPushReversed(RegList regs) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::SpillAll() {
- UNIMPLEMENTED_MIPS();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_MIPS
+++ /dev/null
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// This dummy class is only used to create invalid virtual frames.
-extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
-
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame. It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack. It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
- class RegisterAllocationScope;
- // A utility class to introduce a scope where the virtual frame is
- // expected to remain spilled. The constructor spills the code
- // generator's current frame, and keeps it spilled.
- class SpilledScope BASE_EMBEDDED {
- public:
- explicit SpilledScope(VirtualFrame* frame)
- : old_is_spilled_(
- Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
- if (frame != NULL) {
- if (!old_is_spilled_) {
- frame->SpillAll();
- } else {
- frame->AssertIsSpilled();
- }
- }
- Isolate::Current()->set_is_virtual_frame_in_spilled_scope(true);
- }
- ~SpilledScope() {
- Isolate::Current()->set_is_virtual_frame_in_spilled_scope(
- old_is_spilled_);
- }
- static bool is_spilled() {
- return Isolate::Current()->is_virtual_frame_in_spilled_scope();
- }
-
- private:
- int old_is_spilled_;
-
- SpilledScope() {}
-
- friend class RegisterAllocationScope;
- };
-
- class RegisterAllocationScope BASE_EMBEDDED {
- public:
- // A utility class to introduce a scope where the virtual frame
- // is not spilled, ie. where register allocation occurs. Eventually
- // when RegisterAllocationScope is ubiquitous it can be removed
- // along with the (by then unused) SpilledScope class.
- inline explicit RegisterAllocationScope(CodeGenerator* cgen);
- inline ~RegisterAllocationScope();
-
- private:
- CodeGenerator* cgen_;
- bool old_is_spilled_;
-
- RegisterAllocationScope() {}
- };
-
- // An illegal index into the virtual frame.
- static const int kIllegalIndex = -1;
-
- // Construct an initial virtual frame on entry to a JS function.
- inline VirtualFrame();
-
- // Construct an invalid virtual frame, used by JumpTargets.
- explicit inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
-
- // Construct a virtual frame as a clone of an existing one.
- explicit inline VirtualFrame(VirtualFrame* original);
-
- inline CodeGenerator* cgen() const;
- inline MacroAssembler* masm();
-
- // The number of elements on the virtual frame.
- int element_count() const { return element_count_; }
-
- // The height of the virtual expression stack.
- inline int height() const;
-
- bool is_used(int num) {
- switch (num) {
- case 0: { // a0.
- return kA0InUse[top_of_stack_state_];
- }
- case 1: { // a1.
- return kA1InUse[top_of_stack_state_];
- }
- case 2:
- case 3:
- case 4:
- case 5:
- case 6: { // a2 to a3, t0 to t2.
- ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
- ASSERT(num >= kFirstAllocatedRegister);
- if ((register_allocation_map_ &
- (1 << (num - kFirstAllocatedRegister))) == 0) {
- return false;
- } else {
- return true;
- }
- }
- default: {
- ASSERT(num < kFirstAllocatedRegister ||
- num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
- return false;
- }
- }
- }
-
- // Add extra in-memory elements to the top of the frame to match an actual
- // frame (eg, the frame after an exception handler is pushed). No code is
- // emitted.
- void Adjust(int count);
-
- // Forget elements from the top of the frame to match an actual frame (eg,
- // the frame after a runtime call). No code is emitted except to bring the
- // frame to a spilled state.
- void Forget(int count);
-
-
- // Spill all values from the frame to memory.
- void SpillAll();
-
- void AssertIsSpilled() const {
- ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
- ASSERT(register_allocation_map_ == 0);
- }
-
- void AssertIsNotSpilled() {
- ASSERT(!SpilledScope::is_spilled());
- }
-
- // Spill all occurrences of a specific register from the frame.
- void Spill(Register reg) {
- UNIMPLEMENTED();
- }
-
- // Spill all occurrences of an arbitrary register if possible. Return the
- // register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references). Unimplemented.
- Register SpillAnyRegister();
-
- // Make this virtual frame have a state identical to an expected virtual
- // frame. As a side effect, code may be emitted to make this frame match
- // the expected one.
- void MergeTo(const VirtualFrame* expected,
- Condition cond = al,
- Register r1 = no_reg,
- const Operand& r2 = Operand(no_reg));
-
- void MergeTo(VirtualFrame* expected,
- Condition cond = al,
- Register r1 = no_reg,
- const Operand& r2 = Operand(no_reg));
-
- // Checks whether this frame can be branched to by the other frame.
- bool IsCompatibleWith(const VirtualFrame* other) const {
- return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
- }
-
- inline void ForgetTypeInfo() {
- tos_known_smi_map_ = 0;
- }
-
- // Detach a frame from its code generator, perhaps temporarily. This
- // tells the register allocator that it is free to use frame-internal
- // registers. Used when the code generator's frame is switched from this
- // one to NULL by an unconditional jump.
- void DetachFromCodeGenerator() {
- }
-
- // (Re)attach a frame to its code generator. This informs the register
- // allocator that the frame-internal register references are active again.
- // Used when a code generator's frame is switched from NULL to this one by
- // binding a label.
- void AttachToCodeGenerator() {
- }
-
- // Emit code for the physical JS entry and exit frame sequences. After
- // calling Enter, the virtual frame is ready for use; and after calling
- // Exit it should not be used. Note that Enter does not allocate space in
- // the physical frame for storing frame-allocated locals.
- void Enter();
- void Exit();
-
- // Prepare for returning from the frame by elements in the virtual frame.
- // This avoids generating unnecessary merge code when jumping to the shared
- // return site. No spill code emitted. Value to return should be in v0.
- inline void PrepareForReturn();
-
- // Number of local variables after when we use a loop for allocating.
- static const int kLocalVarBound = 5;
-
- // Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots();
-
- // The current top of the expression stack as an assembly operand.
- MemOperand Top() {
- AssertIsSpilled();
- return MemOperand(sp, 0);
- }
-
- // An element of the expression stack as an assembly operand.
- MemOperand ElementAt(int index) {
- int adjusted_index = index - kVirtualElements[top_of_stack_state_];
- ASSERT(adjusted_index >= 0);
- return MemOperand(sp, adjusted_index * kPointerSize);
- }
-
- bool KnownSmiAt(int index) {
- if (index >= kTOSKnownSmiMapSize) return false;
- return (tos_known_smi_map_ & (1 << index)) != 0;
- }
- // A frame-allocated local as an assembly operand.
- inline MemOperand LocalAt(int index);
-
- // Push the address of the receiver slot on the frame.
- void PushReceiverSlotAddress();
-
- // The function frame slot.
- MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
-
- // The context frame slot.
- MemOperand Context() { return MemOperand(fp, kContextOffset); }
-
- // A parameter as an assembly operand.
- inline MemOperand ParameterAt(int index);
-
- // The receiver frame slot.
- inline MemOperand Receiver();
-
- // Push a try-catch or try-finally handler on top of the virtual frame.
- void PushTryHandler(HandlerType type);
-
- // Call stub given the number of arguments it expects on (and
- // removes from) the stack.
- inline void CallStub(CodeStub* stub, int arg_count);
-
- // Call JS function from top of the stack with arguments
- // taken from the stack.
- void CallJSFunction(int arg_count);
-
- // Call runtime given the number of arguments expected on (and
- // removed from) the stack.
- void CallRuntime(const Runtime::Function* f, int arg_count);
- void CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- void DebugBreak();
-#endif
-
- // Invoke builtin given the number of arguments it expects on (and
- // removes from) the stack.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count);
-
- // Call load IC. Receiver is on the stack and is consumed. Result is returned
- // in v0.
- void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
-
- // Call store IC. If the load is contextual, value is found on top of the
- // frame. If not, value and receiver are on the frame. Both are consumed.
- // Result is returned in v0.
- void CallStoreIC(Handle<String> name, bool is_contextual);
-
- // Call keyed load IC. Key and receiver are on the stack. Both are consumed.
- // Result is returned in v0.
- void CallKeyedLoadIC();
-
- // Call keyed store IC. Value, key and receiver are on the stack. All three
- // are consumed. Result is returned in v0 (and a0).
- void CallKeyedStoreIC();
-
- // Call into an IC stub given the number of arguments it removes
- // from the stack. Register arguments to the IC stub are implicit,
- // and depend on the type of IC stub.
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- int dropped_args);
-
- // Drop a number of elements from the top of the expression stack. May
- // emit code to affect the physical frame. Does not clobber any registers
- // excepting possibly the stack pointer.
- void Drop(int count);
-
- // Drop one element.
- void Drop() { Drop(1); }
-
- // Pop an element from the top of the expression stack. Discards
- // the result.
- void Pop();
-
- // Pop an element from the top of the expression stack. The register
- // will be one normally used for the top of stack register allocation
- // so you can't hold on to it if you push on the stack.
- Register PopToRegister(Register but_not_to_this_one = no_reg);
-
- // Look at the top of the stack. The register returned is aliased and
- // must be copied to a scratch register before modification.
- Register Peek();
-
- // Look at the value beneath the top of the stack. The register returned is
- // aliased and must be copied to a scratch register before modification.
- Register Peek2();
-
- // Duplicate the top of stack.
- void Dup();
-
- // Duplicate the two elements on top of stack.
- void Dup2();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in a0.
- void SpillAllButCopyTOSToA0();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in a1.
- void SpillAllButCopyTOSToA1();
-
- // Flushes all registers, but it puts a copy of the top-of-stack in a1
- // and the next value on the stack in a0.
- void SpillAllButCopyTOSToA1A0();
-
- // Pop and save an element from the top of the expression stack and
- // emit a corresponding pop instruction.
- void EmitPop(Register reg);
- // Same but for multiple registers
- void EmitMultiPop(RegList regs);
- void EmitMultiPopReversed(RegList regs);
-
-
- // Takes the top two elements and puts them in a0 (top element) and a1
- // (second element).
- void PopToA1A0();
-
- // Takes the top element and puts it in a1.
- void PopToA1();
-
- // Takes the top element and puts it in a0.
- void PopToA0();
-
- // Push an element on top of the expression stack and emit a
- // corresponding push instruction.
- void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
- void EmitPushRoot(Heap::RootListIndex index);
-
- // Overwrite the nth thing on the stack. If the nth position is in a
- // register then this turns into a Move, otherwise an sw. Afterwards
- // you can still use the register even if it is a register that can be
- // used for TOS (a0 or a1).
- void SetElementAt(Register reg, int this_far_down);
-
- // Get a register which is free and which must be immediately used to
- // push on the top of the stack.
- Register GetTOSRegister();
-
- // Same but for multiple registers.
- void EmitMultiPush(RegList regs);
- void EmitMultiPushReversed(RegList regs);
-
- static Register scratch0() { return t4; }
- static Register scratch1() { return t5; }
- static Register scratch2() { return t6; }
-
- private:
- static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
- static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
- static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
- static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
-
- // 5 states for the top of stack, which can be in memory or in a0 and a1.
- enum TopOfStack { NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS,
- TOS_STATES};
- static const int kMaxTOSRegisters = 2;
-
- static const bool kA0InUse[TOS_STATES];
- static const bool kA1InUse[TOS_STATES];
- static const int kVirtualElements[TOS_STATES];
- static const TopOfStack kStateAfterPop[TOS_STATES];
- static const TopOfStack kStateAfterPush[TOS_STATES];
- static const Register kTopRegister[TOS_STATES];
- static const Register kBottomRegister[TOS_STATES];
-
- // We allocate up to 5 locals in registers.
- static const int kNumberOfAllocatedRegisters = 5;
- // r2 to r6 are allocated to locals.
- static const int kFirstAllocatedRegister = 2;
-
- static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
-
- static Register AllocatedRegister(int r) {
- ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
- return kAllocatedRegisters[r];
- }
-
- // The number of elements on the stack frame.
- int element_count_;
- TopOfStack top_of_stack_state_:3;
- int register_allocation_map_:kNumberOfAllocatedRegisters;
- static const int kTOSKnownSmiMapSize = 4;
- unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
-
- // The index of the element that is at the processor's stack pointer
- // (the sp register). For now since everything is in memory it is given
- // by the number of elements on the not-very-virtual stack frame.
- int stack_pointer() { return element_count_ - 1; }
-
- // The number of frame-allocated locals and parameters respectively.
- inline int parameter_count() const;
- inline int local_count() const;
-
- // The index of the element that is at the processor's frame pointer
- // (the fp register). The parameters, receiver, function, and context
- // are below the frame pointer.
- inline int frame_pointer() const;
-
- // The index of the first parameter. The receiver lies below the first
- // parameter.
- int param0_index() { return 1; }
-
- // The index of the context slot in the frame. It is immediately
- // below the frame pointer.
- inline int context_index();
-
- // The index of the function slot in the frame. It is below the frame
- // pointer and context slot.
- inline int function_index();
-
- // The index of the first local. Between the frame pointer and the
- // locals lies the return address.
- inline int local0_index() const;
-
- // The index of the base of the expression stack.
- inline int expression_base_index() const;
-
- // Convert a frame index into a frame pointer relative offset into the
- // actual stack.
- inline int fp_relative(int index);
-
- // Spill all elements in registers. Spill the top spilled_args elements
- // on the frame. Sync all other frame elements.
- // Then drop dropped_args elements from the virtual frame, to match
- // the effect of an upcoming call that will drop them from the stack.
- void PrepareForCall(int spilled_args, int dropped_args);
-
- // If all top-of-stack registers are in use then the lowest one is pushed
- // onto the physical stack and made free.
- void EnsureOneFreeTOSRegister();
-
- // Emit instructions to get the top of stack state from where we are to where
- // we want to be.
- void MergeTOSTo(TopOfStack expected_state,
- Condition cond = al,
- Register r1 = no_reg,
- const Operand& r2 = Operand(no_reg));
-
- inline bool Equals(const VirtualFrame* other);
-
- inline void LowerHeight(int count) {
- element_count_ -= count;
- if (count >= kTOSKnownSmiMapSize) {
- tos_known_smi_map_ = 0;
- } else {
- tos_known_smi_map_ >>= count;
- }
- }
-
- inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
- ASSERT(known_smi_map < (1u << count));
- element_count_ += count;
- if (count >= kTOSKnownSmiMapSize) {
- tos_known_smi_map_ = known_smi_map;
- } else {
- tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
- }
- }
- friend class JumpTarget;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_MIPS_VIRTUAL_FRAME_MIPS_H_
-
'arch:x64': ['test-assembler-x64.cc',
'test-macro-assembler-x64.cc',
'test-log-stack-tracer.cc'],
- 'arch:mips': ['test-assembler-mips.cc'],
+ 'arch:mips': ['test-assembler-mips.cc',
+ 'test-disasm-mips.cc'],
'os:linux': ['test-platform-linux.cc'],
'os:macos': ['test-platform-macos.cc'],
'os:nullos': ['test-platform-nullos.cc'],
'V8_TARGET_ARCH_IA32',
],
}],
+ ['v8_target_arch=="mips"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_MIPS',
+ ],
+ }],
['v8_target_arch=="x64"', {
'defines': [
'V8_TARGET_ARCH_X64',
['v8_target_arch=="mips"', {
'sources': [
'test-assembler-mips.cc',
- 'test-mips.cc',
+ 'test-disasm-mips.cc',
],
}],
[ 'OS=="linux"', {
test-func-name-inference: SKIP
test-heap: SKIP
test-heap-profiler: SKIP
+test-lockers: SKIP
test-log: SKIP
test-log-utils: SKIP
test-mark-compact: SKIP
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
InitializeVM();
v8::HandleScope scope;
- MacroAssembler assm(NULL, 0);
+ MacroAssembler assm(Isolate::Current(), NULL, 0);
// Addition.
__ addu(v0, a0, a1);
InitializeVM();
v8::HandleScope scope;
- MacroAssembler assm(NULL, 0);
+ MacroAssembler assm(Isolate::Current(), NULL, 0);
Label L, C;
__ mov(a1, a0);
InitializeVM();
v8::HandleScope scope;
- MacroAssembler assm(NULL, 0);
+ MacroAssembler assm(Isolate::Current(), NULL, 0);
Label exit, error;
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(NULL, 0);
+ MacroAssembler assm(Isolate::Current(), NULL, 0);
Label L, C;
- if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+ if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
__ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
} T;
T t;
- Assembler assm(NULL, 0);
+ Assembler assm(Isolate::Current(), NULL, 0);
Label L, C;
- if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+ if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
__ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
} T;
T t;
- Assembler assm(NULL, 0);
+ Assembler assm(Isolate::Current(), NULL, 0);
Label L, C;
- if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+ if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Load all structure elements to registers.
} T;
T t;
- Assembler assm(NULL, 0);
+ Assembler assm(Isolate::Current(), NULL, 0);
Label L, C;
// Basic word load/store.
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(NULL, 0);
+ MacroAssembler assm(Isolate::Current(), NULL, 0);
Label neither_is_nan, less_than, outa_here;
- if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+ if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
__ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
} T;
T t;
- MacroAssembler assm(NULL, 0);
+ MacroAssembler assm(Isolate::Current(), NULL, 0);
// Basic word load.
__ lw(t0, MemOperand(a0, OFFSET_OF(T, input)) );
InitializeVM();
v8::HandleScope scope;
- MacroAssembler assm(NULL, 0);
+ MacroAssembler assm(Isolate::Current(), NULL, 0);
Label exit, exit2, exit3;
__ Branch(&exit, ge, a0, Operand(0x00000000));
} T;
T t;
- Assembler assm(NULL, 0);
+ Assembler assm(Isolate::Current(), NULL, 0);
Label L, C;
- if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+ if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Load all structure elements to registers.
} T;
T t;
- Assembler assm(NULL, 0);
+ Assembler assm(Isolate::Current(), NULL, 0);
// Test all combinations of LWL and vAddr.
__ lw(t0, MemOperand(a0, OFFSET_OF(T, reg_init)) );
} T;
T t;
- MacroAssembler assm(NULL, 0);
+ MacroAssembler assm(Isolate::Current(), NULL, 0);
__ mov(t6, fp); // Save frame pointer.
__ mov(fp, a0); // Access struct T by fp.
__ addu(t1, t0, t3);
__ subu(t4, t0, t3);
__ nop();
- __ Push(t0); // These instructions disappear after opt.
+ __ push(t0); // These instructions disappear after opt.
__ Pop();
__ addu(t0, t0, t0);
__ nop();
__ Pop(); // These instructions disappear after opt.
- __ Push(t3);
+ __ push(t3);
__ nop();
- __ Push(t3); // These instructions disappear after opt.
- __ Pop(t3);
+ __ push(t3); // These instructions disappear after opt.
+ __ pop(t3);
__ nop();
- __ Push(t3);
- __ Pop(t4);
+ __ push(t3);
+ __ pop(t4);
__ nop();
__ sw(t0, MemOperand(fp, OFFSET_OF(T, y)) );
__ lw(t0, MemOperand(fp, OFFSET_OF(T, y)) );
__ sw(t0, MemOperand(fp, OFFSET_OF(T, y)) );
__ lw(t1, MemOperand(fp, OFFSET_OF(T, y)) );
__ nop();
- __ Push(t1);
+ __ push(t1);
__ lw(t1, MemOperand(fp, OFFSET_OF(T, y)) );
- __ Pop(t1);
+ __ pop(t1);
__ nop();
- __ Push(t1);
+ __ push(t1);
__ lw(t2, MemOperand(fp, OFFSET_OF(T, y)) );
- __ Pop(t1);
+ __ pop(t1);
__ nop();
- __ Push(t1);
+ __ push(t1);
__ lw(t2, MemOperand(fp, OFFSET_OF(T, y)) );
- __ Pop(t2);
+ __ pop(t2);
__ nop();
- __ Push(t2);
+ __ push(t2);
__ lw(t2, MemOperand(fp, OFFSET_OF(T, y)) );
- __ Pop(t1);
+ __ pop(t1);
__ nop();
- __ Push(t1);
+ __ push(t1);
__ lw(t2, MemOperand(fp, OFFSET_OF(T, y)) );
- __ Pop(t3);
+ __ pop(t3);
__ nop();
__ mov(fp, t6);
} T;
T t;
- MacroAssembler assm(NULL, 0);
+ MacroAssembler assm(Isolate::Current(), NULL, 0);
- if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+ if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
__ sw(t0, MemOperand(a0, OFFSET_OF(T, cvt_small_in)));
int32_t x##_down_out; \
int32_t neg_##x##_up_out; \
int32_t neg_##x##_down_out; \
- int32_t x##_err1_out; \
- int32_t x##_err2_out; \
- int32_t x##_err3_out; \
- int32_t x##_err4_out; \
+ uint32_t x##_err1_out; \
+ uint32_t x##_err2_out; \
+ uint32_t x##_err3_out; \
+ uint32_t x##_err4_out; \
int32_t x##_invalid_result;
typedef struct {
#undef ROUND_STRUCT_ELEMENT
- MacroAssembler assm(NULL, 0);
+ MacroAssembler assm(Isolate::Current(), NULL, 0);
- if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+ if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Save FCSR.
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
-#define GET_FPU_ERR(x) ((x >> 2) & (32 - 1))
+#define GET_FPU_ERR(x) (static_cast<int>((x >> kFCSRFlagShift) & kFCSRFlagMask))
CHECK_EQ(124, t.round_up_out);
CHECK_EQ(123, t.round_down_out);
CHECK_EQ(-124, t.neg_round_up_out);
CHECK_EQ(-123, t.neg_round_down_out);
- // Inaccurate.
- CHECK_EQ(1, GET_FPU_ERR(t.round_err1_out));
+ // Inexact.
+ CHECK_EQ(kFCSRInexactFlagBit, GET_FPU_ERR(t.round_err1_out));
// No error.
CHECK_EQ(0, GET_FPU_ERR(t.round_err2_out));
// Invalid operation.
- CHECK_EQ(16, GET_FPU_ERR(t.round_err3_out));
- CHECK_EQ(16, GET_FPU_ERR(t.round_err4_out));
+ CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.round_err3_out));
+ CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.round_err4_out));
CHECK_EQ(kFPUInvalidResult, t.round_invalid_result);
CHECK_EQ(123, t.floor_up_out);
CHECK_EQ(-124, t.neg_floor_up_out);
CHECK_EQ(-124, t.neg_floor_down_out);
- // Inaccurate.
- CHECK_EQ(1, GET_FPU_ERR(t.floor_err1_out));
+ // Inexact.
+ CHECK_EQ(kFCSRInexactFlagBit, GET_FPU_ERR(t.floor_err1_out));
// No error.
CHECK_EQ(0, GET_FPU_ERR(t.floor_err2_out));
// Invalid operation.
- CHECK_EQ(16, GET_FPU_ERR(t.floor_err3_out));
- CHECK_EQ(16, GET_FPU_ERR(t.floor_err4_out));
+ CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.floor_err3_out));
+ CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.floor_err4_out));
CHECK_EQ(kFPUInvalidResult, t.floor_invalid_result);
CHECK_EQ(124, t.ceil_up_out);
CHECK_EQ(-123, t.neg_ceil_up_out);
CHECK_EQ(-123, t.neg_ceil_down_out);
- // Inaccurate.
- CHECK_EQ(1, GET_FPU_ERR(t.ceil_err1_out));
+ // Inexact.
+ CHECK_EQ(kFCSRInexactFlagBit, GET_FPU_ERR(t.ceil_err1_out));
// No error.
CHECK_EQ(0, GET_FPU_ERR(t.ceil_err2_out));
// Invalid operation.
- CHECK_EQ(16, GET_FPU_ERR(t.ceil_err3_out));
- CHECK_EQ(16, GET_FPU_ERR(t.ceil_err4_out));
+ CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.ceil_err3_out));
+ CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.ceil_err4_out));
CHECK_EQ(kFPUInvalidResult, t.ceil_invalid_result);
// In rounding mode 0 cvt should behave like round.
CHECK_EQ(t.neg_round_up_out, t.neg_cvt_up_out);
CHECK_EQ(t.neg_round_down_out, t.neg_cvt_down_out);
- // Inaccurate.
- CHECK_EQ(1, GET_FPU_ERR(t.cvt_err1_out));
+ // Inexact.
+ CHECK_EQ(kFCSRInexactFlagBit, GET_FPU_ERR(t.cvt_err1_out));
// No error.
CHECK_EQ(0, GET_FPU_ERR(t.cvt_err2_out));
// Invalid operation.
- CHECK_EQ(16, GET_FPU_ERR(t.cvt_err3_out));
- CHECK_EQ(16, GET_FPU_ERR(t.cvt_err4_out));
+ CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.cvt_err3_out));
+ CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.cvt_err4_out));
CHECK_EQ(kFPUInvalidResult, t.cvt_invalid_result);
}
}