IA32OperandConverter i(this, instr);
switch (ArchOpcodeField::decode(instr->opcode())) {
+ case kArchCallCodeObject: {
+ EnsureSpaceForLazyDeopt();
+ if (HasImmediateInput(instr, 0)) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ __ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
+ }
+ AddSafepointAndDeopt(instr);
+ break;
+ }
+ case kArchCallJSFunction: {
+ EnsureSpaceForLazyDeopt();
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ // Check the function's context matches the context argument.
+ __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
+ __ Assert(equal, kWrongFunctionContext);
+ }
+ __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ AddSafepointAndDeopt(instr);
+ break;
+ }
case kArchJmp:
__ jmp(code()->GetLabel(i.InputBlock(0)));
break;
case kArchRet:
AssembleReturn();
break;
- case kArchDeoptimize: {
- int deoptimization_id = MiscField::decode(instr->opcode());
- BuildTranslation(instr, deoptimization_id);
-
- Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
- __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
- }
case kIA32Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
__ sar_cl(i.OutputRegister());
}
break;
- case kIA32Push:
- if (HasImmediateInput(instr, 0)) {
- __ push(i.InputImmediate(0));
- } else {
- __ push(i.InputOperand(0));
- }
- break;
- case kIA32CallCodeObject: {
- if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
- __ call(code, RelocInfo::CODE_TARGET);
- } else {
- Register reg = i.InputRegister(0);
- int entry = Code::kHeaderSize - kHeapObjectTag;
- __ call(Operand(reg, entry));
- }
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
-
- bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
- if (lazy_deopt) {
- RecordLazyDeoptimizationEntry(instr);
- }
- AddNopForSmiCodeInlining();
- break;
- }
- case kIA32CallAddress:
- if (HasImmediateInput(instr, 0)) {
- // TODO(dcarney): wire up EXTERNAL_REFERENCE instead of RUNTIME_ENTRY.
- __ call(reinterpret_cast<byte*>(i.InputInt32(0)),
- RelocInfo::RUNTIME_ENTRY);
+ case kIA32Ror:
+ if (HasImmediateInput(instr, 1)) {
+ __ ror(i.OutputRegister(), i.InputInt5(1));
} else {
- __ call(i.InputRegister(0));
+ __ ror_cl(i.OutputRegister());
}
break;
- case kPopStack: {
- int words = MiscField::decode(instr->opcode());
- __ add(esp, Immediate(kPointerSize * words));
- break;
- }
- case kIA32CallJSFunction: {
- Register func = i.InputRegister(0);
-
- // TODO(jarin) The load of the context should be separated from the call.
- __ mov(esi, FieldOperand(func, JSFunction::kContextOffset));
- __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
-
- RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- RecordLazyDeoptimizationEntry(instr);
- break;
- }
case kSSEFloat64Cmp:
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
__ add(esp, Immediate(kDoubleSize));
break;
}
+ case kSSEFloat64Sqrt:
+ __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
+ break;
case kSSEFloat64ToInt32:
__ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
break;
case kSSEFloat64ToUint32: {
XMMRegister scratch = xmm0;
__ Move(scratch, -2147483648.0);
- // TODO(turbofan): IA32 SSE subsd() should take an operand.
- __ addsd(scratch, i.InputDoubleRegister(0));
+ __ addsd(scratch, i.InputOperand(0));
__ cvttsd2si(i.OutputRegister(), scratch);
__ add(i.OutputRegister(), Immediate(0x80000000));
break;
// TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
__ LoadUint32(i.OutputDoubleRegister(), i.InputRegister(0));
break;
- case kSSELoad:
- __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ case kIA32Movsxbl:
+ __ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
- case kSSEStore: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ movsd(operand, i.InputDoubleRegister(index));
- break;
- }
- case kIA32LoadWord8:
+ case kIA32Movzxbl:
__ movzx_b(i.OutputRegister(), i.MemoryOperand());
break;
- case kIA32StoreWord8: {
+ case kIA32Movb: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
- __ mov_b(operand, i.InputRegister(index));
+ if (HasImmediateInput(instr, index)) {
+ __ mov_b(operand, i.InputInt8(index));
+ } else {
+ __ mov_b(operand, i.InputRegister(index));
+ }
break;
}
- case kIA32StoreWord8I: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ mov_b(operand, i.InputInt8(index));
+ case kIA32Movsxwl:
+ __ movsx_w(i.OutputRegister(), i.MemoryOperand());
break;
- }
- case kIA32LoadWord16:
+ case kIA32Movzxwl:
__ movzx_w(i.OutputRegister(), i.MemoryOperand());
break;
- case kIA32StoreWord16: {
+ case kIA32Movw: {
int index = 0;
Operand operand = i.MemoryOperand(&index);
- __ mov_w(operand, i.InputRegister(index));
+ if (HasImmediateInput(instr, index)) {
+ __ mov_w(operand, i.InputInt16(index));
+ } else {
+ __ mov_w(operand, i.InputRegister(index));
+ }
break;
}
- case kIA32StoreWord16I: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ mov_w(operand, i.InputInt16(index));
+ case kIA32Movl:
+ if (instr->HasOutput()) {
+ __ mov(i.OutputRegister(), i.MemoryOperand());
+ } else {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ mov(operand, i.InputImmediate(index));
+ } else {
+ __ mov(operand, i.InputRegister(index));
+ }
+ }
break;
- }
- case kIA32LoadWord32:
- __ mov(i.OutputRegister(), i.MemoryOperand());
+ case kIA32Movsd:
+ if (instr->HasOutput()) {
+ __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ } else {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movsd(operand, i.InputDoubleRegister(index));
+ }
break;
- case kIA32StoreWord32: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ mov(operand, i.InputRegister(index));
+ case kIA32Movss:
+ if (instr->HasOutput()) {
+ __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ } else {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
+ __ movss(operand, xmm0);
+ }
break;
- }
- case kIA32StoreWord32I: {
- int index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ mov(operand, i.InputImmediate(index));
+ case kIA32Push:
+ if (HasImmediateInput(instr, 0)) {
+ __ push(i.InputImmediate(0));
+ } else {
+ __ push(i.InputOperand(0));
+ }
break;
- }
case kIA32StoreWriteBarrier: {
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
}
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
// The calling convention for JSFunctions on IA32 passes arguments on the
// stack and the JSFunction and context in EDI and ESI, respectively, thus
// the steps of the call look as follows:
} else {
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
__ pop(ebp); // Pop caller's frame pointer.
- int pop_count =
- descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+ int pop_count = descriptor->IsJSFunctionCall()
+ ? static_cast<int>(descriptor->JSParameterCount())
+ : 0;
__ ret(pop_count * kPointerSize);
}
}
__ mov(dst, g.ToImmediate(source));
} else {
double v = g.ToDouble(source);
- uint64_t int_val = BitCast<uint64_t, double>(v);
+ uint64_t int_val = bit_cast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
if (destination->IsDoubleRegister()) {
void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-#undef __
-
-#ifdef DEBUG
-// Checks whether the code between start_pc and end_pc is a no-op.
-bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
- int end_pc) {
- if (start_pc + 1 != end_pc) {
- return false;
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ int space_needed = Deoptimizer::patch_size();
+ if (!linkage()->info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
+ }
}
- return *(code->instruction_start() + start_pc) ==
- v8::internal::Assembler::kNopByte;
+ MarkLazyDeoptSite();
}
-#endif // DEBUG
-}
-}
-} // namespace v8::internal::compiler
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8