immediate_arithmetic_op_32(0x0, dst, src);
}
+ void addl(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op_32(0x0, dst, src);
+ }
+
void addq(Register dst, const Operand& src) {
arithmetic_op(0x03, dst, src);
}
immediate_arithmetic_op(0x0, dst, src);
}
- void addl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
immediate_arithmetic_op_32(0x5, dst, src);
}
+ void subl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x5, dst, src);
+ }
+
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testl(Register reg, Immediate mask);
void DeferredInlineSmiAdd::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- __ subq(dst_, Immediate(value_));
__ push(dst_);
__ push(Immediate(value_));
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
void DeferredInlineSmiAddReversed::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- __ subq(dst_, Immediate(value_));
__ push(Immediate(value_));
__ push(dst_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
void DeferredInlineSmiSub::Generate() {
- // Undo the optimistic sub operation and call the shared stub.
- __ addq(dst_, Immediate(value_));
__ push(dst_);
__ push(Immediate(value_));
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
case Token::ADD: {
operand->ToRegister();
frame_->Spill(operand->reg());
-
- // Optimistically add. Call the specialized add stub if the
- // result is not a smi or overflows.
DeferredCode* deferred = NULL;
if (reversed) {
deferred = new DeferredInlineSmiAddReversed(operand->reg(),
smi_value,
overwrite_mode);
}
- __ movq(kScratchRegister, value, RelocInfo::NONE);
- __ addl(operand->reg(), kScratchRegister);
- deferred->Branch(overflow);
__ testl(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
+ // A smi currently fits in a 32-bit Immediate.
+ __ addl(operand->reg(), Immediate(smi_value));
+ Label add_success;
+ __ j(no_overflow, &add_success);
+ __ subl(operand->reg(), Immediate(smi_value));
+ __ movsxlq(operand->reg(), operand->reg());
+ deferred->Jump();
+ __ bind(&add_success);
+ __ movsxlq(operand->reg(), operand->reg());
deferred->BindExit();
frame_->Push(operand);
break;
__ movq(answer.reg(), left->reg());
switch (op) {
case Token::ADD:
- __ addl(answer.reg(), right->reg()); // Add optimistically.
+ __ addl(answer.reg(), right->reg());
deferred->Branch(overflow);
break;
case Token::SUB:
- __ subl(answer.reg(), right->reg()); // Subtract optimistically.
+ __ subl(answer.reg(), right->reg());
deferred->Branch(overflow);
break;
// callee-saved register.
if (do_gc) {
- __ movq(Operand(rsp, 0), rax); // Result.
+ // Pass failure code returned from last attempt as first argument to GC.
+#ifdef __MSVC__
+ __ movq(rcx, rax); // argc.
+#else // ! defined(__MSVC__)
+ __ movq(rdi, rax); // argv.
+#endif
__ movq(kScratchRegister,
FUNCTION_ADDR(Runtime::PerformGC),
RelocInfo::RUNTIME_ENTRY);
// Perform fast-case smi code for the operation (rax <op> rbx) and
// leave result in register rax.
- // Prepare the smi check of both operands by or'ing them together
- // before checking against the smi mask.
+ // Smi check both operands.
__ movq(rcx, rbx);
__ or_(rcx, rax);
+ __ testl(rcx, Immediate(kSmiTagMask));
+ __ j(not_zero, slow);
switch (op_) {
- case Token::ADD:
- __ addl(rax, rbx); // add optimistically
- __ j(overflow, slow);
+ case Token::ADD: {
+ __ addl(rax, rbx);
+ __ j(overflow, slow); // The slow case rereads operands from the stack.
__ movsxlq(rax, rax); // Sign extend eax into rax.
break;
+ }
- case Token::SUB:
- __ subl(rax, rbx); // subtract optimistically
- __ j(overflow, slow);
+ case Token::SUB: {
+ __ subl(rax, rbx);
+ __ j(overflow, slow); // The slow case rereads operands from the stack.
__ movsxlq(rax, rax); // Sign extend eax into rax.
break;
-
- case Token::DIV:
- case Token::MOD:
- // Sign extend rax into rdx:rax
- // (also sign extends eax into edx if eax is Smi).
- __ cqo();
- // Check for 0 divisor.
- __ testq(rbx, rbx);
- __ j(zero, slow);
- break;
-
- default:
- // Fall-through to smi check.
- break;
- }
-
- // Perform the actual smi check.
- ASSERT(kSmiTag == 0); // adjust zero check if not the case
- __ testl(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, slow);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- // Do nothing here.
- break;
+ }
case Token::MUL:
// If the smi tag is 0 we can just leave the tag on one operand.
break;
case Token::DIV:
+ // Sign extend rax into rdx:rax
+ // (also sign extends eax into edx if eax is Smi).
+ __ cqo();
+ // Check for 0 divisor.
+ __ testq(rbx, rbx);
+ __ j(zero, slow);
// Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax).
__ idiv(rbx);
// Check that the remainder is zero.
break;
case Token::MOD:
+ // Sign extend rax into rdx:rax
+ // (also sign extends eax into edx if eax is Smi).
+ __ cqo();
+ // Check for 0 divisor.
+ __ testq(rbx, rbx);
+ __ j(zero, slow);
// Divide rdx:rax by rbx.
__ idiv(rbx);
// Check for negative zero result.