Register rhs);
+// Check if the operand is a heap number.
+static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
+ Register scratch1, Register scratch2,
+ Label* not_a_heap_number) {
+ __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
+ __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch1, scratch2);
+ __ b(ne, not_a_heap_number);
+}
+
+
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in eax.
Label check_heap_number, call_builtin;
__ Ret();
__ bind(&check_heap_number);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &call_builtin);
+ EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
__ Ret();
__ bind(&call_builtin);
}
+Handle<Code> GetTypeRecordingUnaryOpStub(int key,
+ TRUnaryOpIC::TypeInfo type_info) {
+ TypeRecordingUnaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
+
+
+const char* TypeRecordingUnaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "TypeRecordingUnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ TRUnaryOpIC::GetName(operand_type_));
+ return name_;
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operand_type_) {
+ case TRUnaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case TRUnaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case TRUnaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case TRUnaryOpIC::GENERIC:
+ GenerateGenericStub(masm);
+ break;
+ }
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ // Prepare to push argument.
+ __ mov(r3, Operand(r0));
+
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ mov(r2, Operand(Smi::FromInt(MinorKey())));
+ __ mov(r1, Operand(Smi::FromInt(op_)));
+ __ mov(r0, Operand(Smi::FromInt(operand_type_)));
+
+ __ Push(r3, r2, r1, r0);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kTypeRecordingUnaryOp_Patch),
+ masm->isolate()),
+ 4,
+ 1);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateSmiStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateSmiStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+ Label non_smi;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+ Label* non_smi,
+ Label* slow) {
+ __ JumpIfNotSmi(r0, non_smi);
+
+ // The result of negating zero or the smallest negative smi is not a smi.
+ __ bic(ip, r0, Operand(0x80000000), SetCC);
+ __ b(eq, slow);
+
+ // Return '0 - value'.
+ __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
+ __ Ret();
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+ Label* non_smi) {
+ __ JumpIfNotSmi(r0, non_smi);
+
+ // Flip bits and revert inverted smi-tag.
+ __ mvn(r0, Operand(r0));
+ __ bic(r0, r0, Operand(kSmiTagMask));
+ __ Ret();
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateHeapNumberStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateHeapNumberStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot(
+ MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+ Label* slow) {
+ EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
+ // r0 is a heap number. Get a new heap number in r1.
+ if (mode_ == UNARY_OVERWRITE) {
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ } else {
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ __ push(r0);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r1, Operand(r0));
+ __ pop(r0);
+ __ LeaveInternalFrame();
+
+ __ bind(&heapnumber_allocated);
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+ __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
+ __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
+ __ mov(r0, Operand(r1));
+ }
+ __ Ret();
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeBitNot(
+ MacroAssembler* masm, Label* slow) {
+ EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
+ // Convert the heap number is r0 to an untagged integer in r1.
+ __ ConvertToInt32(r0, r1, r2, r3, d0, slow);
+
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ mvn(r1, Operand(r1));
+ __ add(r2, r1, Operand(0x40000000), SetCC);
+ __ b(mi, &try_float);
+
+ // Tag the result as a smi and we're done.
+ __ mov(r0, Operand(r1, LSL, kSmiTagSize));
+ __ Ret();
+
+ // Try to store the result in a heap number.
+ __ bind(&try_float);
+ if (mode_ == UNARY_NO_OVERWRITE) {
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ AllocateHeapNumber(r0, r2, r3, r6, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ __ push(r1);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ pop(r1);
+ __ LeaveInternalFrame();
+
+ __ bind(&heapnumber_allocated);
+ }
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, r1);
+ __ vcvt_f64_s32(d0, s0);
+ __ sub(r2, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
+ __ Ret();
+ } else {
+ // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+ // have to set up a frame.
+ WriteInt32ToHeapNumberStub stub(r1, r0, r2);
+ __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateGenericStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateGenericStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+ Label non_smi, slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateGenericCodeFallback(
+ MacroAssembler* masm) {
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ push(r0);
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
Handle<Code> GetTypeRecordingBinaryOpStub(int key,
TRBinaryOpIC::TypeInfo type_info,
TRBinaryOpIC::TypeInfo result_type_info) {
};
+class TypeRecordingUnaryOpStub: public CodeStub {
+ public:
+ TypeRecordingUnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operand_type_(TRUnaryOpIC::UNINITIALIZED),
+ name_(NULL) {
+ }
+
+ TypeRecordingUnaryOpStub(
+ int key,
+ TRUnaryOpIC::TypeInfo operand_type)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ operand_type_(operand_type),
+ name_(NULL) {
+ }
+
+ private:
+ Token::Value op_;
+ UnaryOverwriteMode mode_;
+
+ // Operand type information determined at runtime.
+ TRUnaryOpIC::TypeInfo operand_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("TypeRecordingUnaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ TRUnaryOpIC::GetName(operand_type_));
+ }
+#endif
+
+ class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+ class OpBits: public BitField<Token::Value, 1, 7> {};
+ class OperandTypeInfoBits: public BitField<TRUnaryOpIC::TypeInfo, 8, 3> {};
+
+ Major MajorKey() { return TypeRecordingUnaryOp; }
+ int MinorKey() {
+ return ModeBits::encode(mode_)
+ | OpBits::encode(op_)
+ | OperandTypeInfoBits::encode(operand_type_);
+ }
+
+ // Note: A lot of the helper functions below will vanish when we use virtual
+ // function instead of switch more often.
+ void Generate(MacroAssembler* masm);
+
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateSmiStubSub(MacroAssembler* masm);
+ void GenerateSmiStubBitNot(MacroAssembler* masm);
+ void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
+ void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
+
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateHeapNumberStubSub(MacroAssembler* masm);
+ void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+ void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
+
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateGenericStubSub(MacroAssembler* masm);
+ void GenerateGenericStubBitNot(MacroAssembler* masm);
+ void GenerateGenericCodeFallback(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::TYPE_RECORDING_UNARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return TRUnaryOpIC::ToState(operand_type_);
+ }
+
+ virtual void FinishCode(Code* code) {
+ code->set_type_recording_unary_op_type(operand_type_);
+ }
+};
+
+
class TypeRecordingBinaryOpStub: public CodeStub {
public:
TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
break;
}
- case Token::SUB: {
- Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register r0.
- VisitForAccumulatorValue(expr->expression());
- __ CallStub(&stub);
- context()->Plug(r0);
+ case Token::SUB:
+ EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
break;
- }
- case Token::BIT_NOT: {
- Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- // The generic unary operation stub expects the argument to be
- // in the accumulator register r0.
- VisitForAccumulatorValue(expr->expression());
- Label done;
- bool inline_smi_code = ShouldInlineSmiCase(expr->op());
- if (inline_smi_code) {
- Label call_stub;
- __ JumpIfNotSmi(r0, &call_stub);
- __ mvn(r0, Operand(r0));
- // Bit-clear inverted smi-tag.
- __ bic(r0, r0, Operand(kSmiTagMask));
- __ b(&done);
- __ bind(&call_stub);
- }
- bool overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOpFlags flags = inline_smi_code
- ? NO_UNARY_SMI_CODE_IN_STUB
- : NO_UNARY_FLAGS;
- UnaryOverwriteMode mode =
- overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
- __ CallStub(&stub);
- __ bind(&done);
- context()->Plug(r0);
+ case Token::BIT_NOT:
+ EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
break;
- }
default:
UNREACHABLE();
}
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+ const char* comment) {
+ // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+ Comment cmt(masm_, comment);
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode overwrite =
+ can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ TypeRecordingUnaryOpStub stub(expr->op(), overwrite);
+ // TypeRecordingGenericUnaryOpStub expects the argument to be in the
+ // accumulator register r0.
+ VisitForAccumulatorValue(expr->expression());
+ SetSourcePosition(expr->position());
+ EmitCallIC(stub.GetCode(), NULL, expr->id());
+ context()->Plug(r0);
+}
+
+
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
class UnaryOperation: public Expression {
public:
- UnaryOperation(Token::Value op, Expression* expression)
- : op_(op), expression_(expression) {
+ UnaryOperation(Token::Value op, Expression* expression, int pos)
+ : op_(op), expression_(expression), pos_(pos) {
ASSERT(Token::IsUnaryOp(op));
}
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
+ virtual int position() const { return pos_; }
private:
Token::Value op_;
Expression* expression_;
+ int pos_;
};
// as only the stubs up to and including Instanceof allows nested stub calls.
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
V(CallFunction) \
+ V(TypeRecordingUnaryOp) \
V(TypeRecordingBinaryOp) \
V(StringAdd) \
V(SubString) \
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
+
+ void EmitUnaryOperation(UnaryOperation* expr, const char* comment);
+
// Handles the shortcutted logical binary operations in VisitBinaryOperation.
void EmitLogicalOperation(BinaryOperation* expr);
};
+// Get the integer part of a heap number. Surprisingly, all this bit twiddling
+// is faster than using the built-in instructions on floating point registers.
+// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
+// trashed registers.
+static void IntegerConvert(MacroAssembler* masm,
+ Register source,
+ TypeInfo type_info,
+ bool use_sse3,
+ Label* conversion_failure) {
+ ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
+ Label done, right_exponent, normal_exponent;
+ Register scratch = ebx;
+ Register scratch2 = edi;
+ if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
+ return;
+ }
+ if (!type_info.IsInteger32() || !use_sse3) {
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
+ }
+ if (use_sse3) {
+ CpuFeatures::Scope scope(SSE3);
+ if (!type_info.IsInteger32()) {
+ // Check whether the exponent is too big for a 64 bit signed integer.
+ static const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+ __ j(greater_equal, conversion_failure);
+ }
+ // Load x87 register with heap number.
+ __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
+ // Reserve space for 64 bit answer.
+ __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ // Do conversion, which cannot fail because we checked the exponent.
+ __ fisttp_d(Operand(esp, 0));
+ __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
+ __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ } else {
+ // Load ecx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(ecx, Operand(ecx));
+ // Check whether the exponent matches a 32 bit signed int that cannot be
+ // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
+ // exponent is 30 (biased). This is the exponent that we are fastest at and
+ // also the highest exponent we can handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+ // If we have a match of the int32-but-not-Smi exponent then skip some
+ // logic.
+ __ j(equal, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ __ j(less, &normal_exponent);
+
+ {
+ // Handle a big exponent. The only reason we have this code is that the
+ // >>> operator has a tendency to generate numbers with an exponent of 31.
+ const uint32_t big_non_smi_exponent =
+ (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+ __ j(not_equal, conversion_failure);
+ // We have the big exponent, typically from >>>. This means the number is
+ // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch2, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to use the full unsigned range so we subtract 1 bit from the
+ // shift distance.
+ const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ __ shl(scratch2, big_shift_distance);
+ // Get the second half of the double.
+ __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 21 bits to get the most significant 11 bits or the low
+ // mantissa word.
+ __ shr(ecx, 32 - big_shift_distance);
+ __ or_(ecx, Operand(scratch2));
+ // We have the answer in ecx, but we may need to negate it.
+ __ test(scratch, Operand(scratch));
+ __ j(positive, &done);
+ __ neg(ecx);
+ __ jmp(&done);
+ }
+
+ __ bind(&normal_exponent);
+ // Exponent word in scratch, exponent part of exponent word in scratch2.
+ // Zero in ecx.
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ __ sub(Operand(scratch2), Immediate(zero_exponent));
+ // ecx already has a Smi zero.
+ __ j(less, &done);
+
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ mov(ecx, Immediate(30));
+ __ sub(ecx, Operand(scratch2));
+
+ __ bind(&right_exponent);
+ // Here ecx is the shift, scratch is the exponent word.
+ // Get the top bits of the mantissa.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We have kExponentShift + 1 significant bits int he low end of the
+ // word. Shift them to the top bits.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ shl(scratch, shift_distance);
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the most significant 10 bits or the low
+ // mantissa word.
+ __ shr(scratch2, 32 - shift_distance);
+ __ or_(scratch2, Operand(scratch));
+ // Move down according to the exponent.
+ __ shr_cl(scratch2);
+ // Now the unsigned answer is in scratch2. We need to move it to ecx and
+ // we may need to fix the sign.
+ NearLabel negative;
+ __ xor_(ecx, Operand(ecx));
+ __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
+ __ j(greater, &negative);
+ __ mov(ecx, scratch2);
+ __ jmp(&done);
+ __ bind(&negative);
+ __ sub(ecx, Operand(scratch2));
+ __ bind(&done);
+ }
+}
+
+
+Handle<Code> GetTypeRecordingUnaryOpStub(int key,
+ TRUnaryOpIC::TypeInfo type_info) {
+ TypeRecordingUnaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
+
+
+const char* TypeRecordingUnaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "TypeRecordingUnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ TRUnaryOpIC::GetName(operand_type_));
+ return name_;
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operand_type_) {
+ case TRUnaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case TRUnaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case TRUnaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case TRUnaryOpIC::GENERIC:
+ GenerateGenericStub(masm);
+ break;
+ }
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ __ pop(ecx); // Save return address.
+ __ push(eax);
+ // the argument is now on top.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ push(Immediate(Smi::FromInt(MinorKey())));
+ __ push(Immediate(Smi::FromInt(op_)));
+ __ push(Immediate(Smi::FromInt(operand_type_)));
+
+ __ push(ecx); // Push return address.
+
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kTypeRecordingUnaryOp_Patch),
+ masm->isolate()),
+ 4,
+ 1);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateSmiStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateSmiStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+ NearLabel non_smi;
+ Label undo, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &undo, &slow);
+ __ bind(&undo);
+ GenerateSmiCodeUndo(masm);
+ __ bind(&non_smi);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+ NearLabel non_smi;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+ NearLabel* non_smi,
+ Label* undo,
+ Label* slow) {
+ // Check whether the value is a smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, non_smi);
+
+ // We can't handle -0 with smis, so use a type transition for that case.
+ __ test(eax, Operand(eax));
+ __ j(zero, slow);
+
+ // Try optimistic subtraction '0 - value', saving operand in eax for undo.
+ __ mov(edx, Operand(eax));
+ __ Set(eax, Immediate(0));
+ __ sub(eax, Operand(edx));
+ __ j(overflow, undo);
+ __ ret(0);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+ NearLabel* non_smi) {
+ // Check whether the value is a smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, non_smi);
+
+ // Flip bits and revert inverted smi-tag.
+ __ not_(eax);
+ __ and_(eax, ~kSmiTagMask);
+ __ ret(0);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
+ __ mov(eax, Operand(edx));
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateHeapNumberStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateHeapNumberStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+ NearLabel non_smi;
+ Label undo, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &undo, &slow);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&undo);
+ GenerateSmiCodeUndo(masm);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot(
+ MacroAssembler* masm) {
+ NearLabel non_smi;
+ Label slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+ Label* slow) {
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, slow);
+
+ if (mode_ == UNARY_OVERWRITE) {
+ __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+ __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
+ } else {
+ __ mov(edx, Operand(eax));
+ // edx: operand
+
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ __ push(edx);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ pop(edx);
+ __ LeaveInternalFrame();
+
+ __ bind(&heapnumber_allocated);
+ // eax: allocated 'empty' number
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+ __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+ }
+ __ ret(0);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeBitNot(
+ MacroAssembler* masm,
+ Label* slow) {
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, slow);
+
+ // Convert the heap number in eax to an untagged integer in ecx.
+ IntegerConvert(masm, eax, TypeInfo::Unknown(), CpuFeatures::IsSupported(SSE3),
+ slow);
+
+ // Do the bitwise operation and check if the result fits in a smi.
+ NearLabel try_float;
+ __ not_(ecx);
+ __ cmp(ecx, 0xc0000000);
+ __ j(sign, &try_float);
+
+ // Tag the result as a smi and we're done.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ lea(eax, Operand(ecx, times_2, kSmiTag));
+ __ ret(0);
+
+ // Try to store the result in a heap number.
+ __ bind(&try_float);
+ if (mode_ == UNARY_NO_OVERWRITE) {
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ __ push(ecx);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ pop(ecx);
+ __ LeaveInternalFrame();
+
+ __ bind(&heapnumber_allocated);
+ }
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ecx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ push(ecx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ecx);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ __ ret(0);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateGenericStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateGenericStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+ NearLabel non_smi;
+ Label undo, slow;
+ GenerateSmiCodeSub(masm, &non_smi, &undo, &slow);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&undo);
+ GenerateSmiCodeUndo(masm);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+ NearLabel non_smi;
+ Label slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateGenericCodeFallback(
+ MacroAssembler* masm) {
+ // Handle the slow case by jumping to the corresponding JavaScript builtin.
+ __ pop(ecx); // pop return address.
+ __ push(eax);
+ __ push(ecx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
Handle<Code> GetTypeRecordingBinaryOpStub(int key,
TRBinaryOpIC::TypeInfo type_info,
TRBinaryOpIC::TypeInfo result_type_info) {
}
-// Get the integer part of a heap number. Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
-// trashed registers.
-void IntegerConvert(MacroAssembler* masm,
- Register source,
- TypeInfo type_info,
- bool use_sse3,
- Label* conversion_failure) {
- ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
- Label done, right_exponent, normal_exponent;
- Register scratch = ebx;
- Register scratch2 = edi;
- if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
- return;
- }
- if (!type_info.IsInteger32() || !use_sse3) {
- // Get exponent word.
- __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- }
- if (use_sse3) {
- CpuFeatures::Scope scope(SSE3);
- if (!type_info.IsInteger32()) {
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- }
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
- // Reserve space for 64 bit answer.
- __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
- } else {
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, Operand(ecx));
- // Check whether the exponent matches a 32 bit signed int that cannot be
- // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
- // exponent is 30 (biased). This is the exponent that we are fastest at and
- // also the highest exponent we can handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- __ j(equal, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ j(less, &normal_exponent);
-
- {
- // Handle a big exponent. The only reason we have this code is that the
- // >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
- __ j(not_equal, conversion_failure);
- // We have the big exponent, typically from >>>. This means the number is
- // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch2, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to use the full unsigned range so we subtract 1 bit from the
- // shift distance.
- const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch2, big_shift_distance);
- // Get the second half of the double.
- __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 21 bits to get the most significant 11 bits or the low
- // mantissa word.
- __ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, Operand(scratch2));
- // We have the answer in ecx, but we may need to negate it.
- __ test(scratch, Operand(scratch));
- __ j(positive, &done);
- __ neg(ecx);
- __ jmp(&done);
- }
-
- __ bind(&normal_exponent);
- // Exponent word in scratch, exponent part of exponent word in scratch2.
- // Zero in ecx.
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(Operand(scratch2), Immediate(zero_exponent));
- // ecx already has a Smi zero.
- __ j(less, &done);
-
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ mov(ecx, Immediate(30));
- __ sub(ecx, Operand(scratch2));
-
- __ bind(&right_exponent);
- // Here ecx is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ shl(scratch, shift_distance);
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, Operand(scratch));
- // Move down according to the exponent.
- __ shr_cl(scratch2);
- // Now the unsigned answer is in scratch2. We need to move it to ecx and
- // we may need to fix the sign.
- NearLabel negative;
- __ xor_(ecx, Operand(ecx));
- __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
- __ j(greater, &negative);
- __ mov(ecx, scratch2);
- __ jmp(&done);
- __ bind(&negative);
- __ sub(ecx, Operand(scratch2));
- __ bind(&done);
- }
-}
-
-
// Input: edx, eax are the left and right objects of a bit op.
// Output: eax, ecx are left and right integers for a bit op.
void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
};
+class TypeRecordingUnaryOpStub: public CodeStub {
+ public:
+ TypeRecordingUnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operand_type_(TRUnaryOpIC::UNINITIALIZED),
+ name_(NULL) {
+ }
+
+ TypeRecordingUnaryOpStub(
+ int key,
+ TRUnaryOpIC::TypeInfo operand_type)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ operand_type_(operand_type),
+ name_(NULL) {
+ }
+
+ private:
+ Token::Value op_;
+ UnaryOverwriteMode mode_;
+
+ // Operand type information determined at runtime.
+ TRUnaryOpIC::TypeInfo operand_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("TypeRecordingUnaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ TRUnaryOpIC::GetName(operand_type_));
+ }
+#endif
+
+ class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+ class OpBits: public BitField<Token::Value, 1, 7> {};
+ class OperandTypeInfoBits: public BitField<TRUnaryOpIC::TypeInfo, 8, 3> {};
+
+ Major MajorKey() { return TypeRecordingUnaryOp; }
+ int MinorKey() {
+ return ModeBits::encode(mode_)
+ | OpBits::encode(op_)
+ | OperandTypeInfoBits::encode(operand_type_);
+ }
+
+ // Note: A lot of the helper functions below will vanish when we use virtual
+ // function instead of switch more often.
+ void Generate(MacroAssembler* masm);
+
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateSmiStubSub(MacroAssembler* masm);
+ void GenerateSmiStubBitNot(MacroAssembler* masm);
+ void GenerateSmiCodeSub(MacroAssembler* masm, NearLabel* non_smi, Label* undo,
+ Label* slow);
+ void GenerateSmiCodeBitNot(MacroAssembler* masm, NearLabel* non_smi);
+ void GenerateSmiCodeUndo(MacroAssembler* masm);
+
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateHeapNumberStubSub(MacroAssembler* masm);
+ void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+ void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
+
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateGenericStubSub(MacroAssembler* masm);
+ void GenerateGenericStubBitNot(MacroAssembler* masm);
+ void GenerateGenericCodeFallback(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::TYPE_RECORDING_UNARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return TRUnaryOpIC::ToState(operand_type_);
+ }
+
+ virtual void FinishCode(Code* code) {
+ code->set_type_recording_unary_op_type(operand_type_);
+ }
+};
+
+
class TypeRecordingBinaryOpStub: public CodeStub {
public:
TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
break;
}
- case Token::SUB: {
- Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register eax.
- VisitForAccumulatorValue(expr->expression());
- __ CallStub(&stub);
- context()->Plug(eax);
+ case Token::SUB:
+ EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
break;
- }
- case Token::BIT_NOT: {
- Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- // The generic unary operation stub expects the argument to be
- // in the accumulator register eax.
- VisitForAccumulatorValue(expr->expression());
- Label done;
- bool inline_smi_case = ShouldInlineSmiCase(expr->op());
- if (inline_smi_case) {
- NearLabel call_stub;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &call_stub);
- __ lea(eax, Operand(eax, kSmiTagMask));
- __ not_(eax);
- __ jmp(&done);
- __ bind(&call_stub);
- }
- bool overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode mode =
- overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpFlags flags = inline_smi_case
- ? NO_UNARY_SMI_CODE_IN_STUB
- : NO_UNARY_FLAGS;
- GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
- __ CallStub(&stub);
- __ bind(&done);
- context()->Plug(eax);
+ case Token::BIT_NOT:
+ EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
break;
- }
default:
UNREACHABLE();
}
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+ const char* comment) {
+ // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+ Comment cmt(masm_, comment);
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode overwrite =
+ can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ TypeRecordingUnaryOpStub stub(expr->op(), overwrite);
+ // TypeRecordingUnaryOpStub expects the argument to be in the
+ // accumulator register eax.
+ VisitForAccumulatorValue(expr->expression());
+ SetSourcePosition(expr->position());
+ EmitCallIC(stub.GetCode(), NULL, expr->id());
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
return KeyedStoreIC::Clear(address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
+ case Code::TYPE_RECORDING_UNARY_OP_IC:
case Code::TYPE_RECORDING_BINARY_OP_IC:
case Code::COMPARE_IC:
// Clearing these is tricky and does not
}
+void TRUnaryOpIC::patch(Code* code) {
+ set_target(code);
+}
+
+
+const char* TRUnaryOpIC::GetName(TypeInfo type_info) {
+ switch (type_info) {
+ case UNINITIALIZED: return "Uninitialized";
+ case SMI: return "Smi";
+ case HEAP_NUMBER: return "HeapNumbers";
+ case GENERIC: return "Generic";
+ default: return "Invalid";
+ }
+}
+
+
+TRUnaryOpIC::State TRUnaryOpIC::ToState(TypeInfo type_info) {
+ switch (type_info) {
+ case UNINITIALIZED:
+ return ::v8::internal::UNINITIALIZED;
+ case SMI:
+ case HEAP_NUMBER:
+ return MONOMORPHIC;
+ case GENERIC:
+ return MEGAMORPHIC;
+ }
+ UNREACHABLE();
+ return ::v8::internal::UNINITIALIZED;
+}
+
+TRUnaryOpIC::TypeInfo TRUnaryOpIC::GetTypeInfo(Handle<Object> operand) {
+ ::v8::internal::TypeInfo operand_type =
+ ::v8::internal::TypeInfo::TypeFromValue(operand);
+ if (operand_type.IsSmi()) {
+ return SMI;
+ } else if (operand_type.IsNumber()) {
+ return HEAP_NUMBER;
+ } else {
+ return GENERIC;
+ }
+}
+
+
+TRUnaryOpIC::TypeInfo TRUnaryOpIC::JoinTypes(TRUnaryOpIC::TypeInfo x,
+ TRUnaryOpIC::TypeInfo y) {
+ return x >= y ? x : y;
+}
+
+
void TRBinaryOpIC::patch(Code* code) {
set_target(code);
}
}
+// defined in code-stubs-<arch>.cc
+// Only needed to remove dependency of ic.cc on code-stubs-<arch>.h.
+Handle<Code> GetTypeRecordingUnaryOpStub(int key,
+ TRUnaryOpIC::TypeInfo type_info);
+
+
+RUNTIME_FUNCTION(MaybeObject*, TypeRecordingUnaryOp_Patch) {
+ ASSERT(args.length() == 4);
+
+ HandleScope scope(isolate);
+ Handle<Object> operand = args.at<Object>(0);
+ int key = Smi::cast(args[1])->value();
+ Token::Value op = static_cast<Token::Value>(Smi::cast(args[2])->value());
+ TRUnaryOpIC::TypeInfo previous_type =
+ static_cast<TRUnaryOpIC::TypeInfo>(Smi::cast(args[3])->value());
+
+ TRUnaryOpIC::TypeInfo type = TRUnaryOpIC::GetTypeInfo(operand);
+ type = TRUnaryOpIC::JoinTypes(type, previous_type);
+
+ Handle<Code> code = GetTypeRecordingUnaryOpStub(key, type);
+ if (!code.is_null()) {
+ if (FLAG_trace_ic) {
+ PrintF("[TypeRecordingUnaryOpIC (%s->%s)#%s]\n",
+ TRUnaryOpIC::GetName(previous_type),
+ TRUnaryOpIC::GetName(type),
+ Token::Name(op));
+ }
+ TRUnaryOpIC ic(isolate);
+ ic.patch(*code);
+ }
+
+ Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
+ isolate->thread_local_top()->context_->builtins(), isolate);
+ Object* builtin = NULL; // Initialization calms down the compiler.
+ switch (op) {
+ case Token::SUB:
+ builtin = builtins->javascript_builtin(Builtins::UNARY_MINUS);
+ break;
+ case Token::BIT_NOT:
+ builtin = builtins->javascript_builtin(Builtins::BIT_NOT);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
+
+ bool caught_exception;
+ Handle<Object> result = Execution::Call(builtin_function, operand, 0, NULL,
+ &caught_exception);
+ if (caught_exception) {
+ return Failure::Exception();
+ }
+ return *result;
+}
+
// defined in code-stubs-<arch>.cc
// Only needed to remove dependency of ic.cc on code-stubs-<arch>.h.
Handle<Code> GetTypeRecordingBinaryOpStub(int key,
ICU(LoadPropertyWithInterceptorForCall) \
ICU(KeyedLoadPropertyWithInterceptor) \
ICU(StoreInterceptorProperty) \
+ ICU(TypeRecordingUnaryOp_Patch) \
ICU(TypeRecordingBinaryOp_Patch) \
ICU(CompareIC_Miss)
//
};
+class TRUnaryOpIC: public IC {
+ public:
+
+ // sorted: increasingly more unspecific (ignoring UNINITIALIZED)
+ // TODO(svenpanne) Using enums+switch is an antipattern, use a class instead.
+ enum TypeInfo {
+ UNINITIALIZED,
+ SMI,
+ HEAP_NUMBER,
+ GENERIC
+ };
+
+ explicit TRUnaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
+
+ void patch(Code* code);
+
+ static const char* GetName(TypeInfo type_info);
+
+ static State ToState(TypeInfo type_info);
+
+ static TypeInfo GetTypeInfo(Handle<Object> operand);
+
+ static TypeInfo JoinTypes(TypeInfo x, TypeInfo y);
+};
+
+
// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
class TRBinaryOpIC: public IC {
public:
case Code::FUNCTION:
case Code::OPTIMIZED_FUNCTION:
return; // We log this later using LogCompiledFunctions.
+ case Code::TYPE_RECORDING_UNARY_OP_IC: // fall through
case Code::TYPE_RECORDING_BINARY_OP_IC: // fall through
case Code::COMPARE_IC: // fall through
case Code::STUB:
void Code::set_major_key(int major) {
ASSERT(kind() == STUB ||
+ kind() == TYPE_RECORDING_UNARY_OP_IC ||
kind() == TYPE_RECORDING_BINARY_OP_IC ||
kind() == COMPARE_IC);
ASSERT(0 <= major && major < 256);
}
+byte Code::type_recording_unary_op_type() {
+ ASSERT(is_type_recording_unary_op_stub());
+ return READ_BYTE_FIELD(this, kUnaryOpTypeOffset);
+}
+
+
+void Code::set_type_recording_unary_op_type(byte value) {
+ ASSERT(is_type_recording_unary_op_stub());
+ WRITE_BYTE_FIELD(this, kUnaryOpTypeOffset, value);
+}
+
+
byte Code::type_recording_binary_op_type() {
ASSERT(is_type_recording_binary_op_stub());
return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
case KEYED_EXTERNAL_ARRAY_STORE_IC: return "KEYED_EXTERNAL_ARRAY_STORE_IC";
case CALL_IC: return "CALL_IC";
case KEYED_CALL_IC: return "KEYED_CALL_IC";
+ case TYPE_RECORDING_UNARY_OP_IC: return "TYPE_RECORDING_UNARY_OP_IC";
case TYPE_RECORDING_BINARY_OP_IC: return "TYPE_RECORDING_BINARY_OP_IC";
case COMPARE_IC: return "COMPARE_IC";
}
STORE_IC,
KEYED_STORE_IC,
KEYED_EXTERNAL_ARRAY_STORE_IC,
+ TYPE_RECORDING_UNARY_OP_IC,
TYPE_RECORDING_BINARY_OP_IC,
COMPARE_IC,
// No more than 16 kinds. The value currently encoded in four bits in
inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
inline bool is_call_stub() { return kind() == CALL_IC; }
inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
+ inline bool is_type_recording_unary_op_stub() {
+ return kind() == TYPE_RECORDING_UNARY_OP_IC;
+ }
inline bool is_type_recording_binary_op_stub() {
return kind() == TYPE_RECORDING_BINARY_OP_IC;
}
inline ExternalArrayType external_array_type();
inline void set_external_array_type(ExternalArrayType value);
+ // [type-recording unary op type]: For all TYPE_RECORDING_UNARY_OP_IC.
+ inline byte type_recording_unary_op_type();
+ inline void set_type_recording_unary_op_type(byte value);
+
// [type-recording binary op type]: For all TYPE_RECORDING_BINARY_OP_IC.
inline byte type_recording_binary_op_type();
inline void set_type_recording_binary_op_type(byte value);
static const int kExternalArrayTypeOffset = kKindSpecificFlagsOffset;
static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
+ static const int kUnaryOpTypeOffset = kStubMajorKeyOffset + 1;
static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
static const int kHasDeoptimizationSupportOffset = kOptimizableOffset + 1;
x = NewCompareNode(cmp, x, y, position);
if (cmp != op) {
// The comparison was negated - add a NOT.
- x = new(zone()) UnaryOperation(Token::NOT, x);
+ x = new(zone()) UnaryOperation(Token::NOT, x, position);
}
} else {
Token::Value op = peek();
if (Token::IsUnaryOp(op)) {
op = Next();
+ int position = scanner().location().beg_pos;
Expression* expression = ParseUnaryExpression(CHECK_OK);
// Compute some expressions involving only number literals.
}
}
- return new(zone()) UnaryOperation(op, expression);
+ return new(zone()) UnaryOperation(op, expression, position);
} else if (Token::IsCountOp(op)) {
op = Next();
CASE(KEYED_EXTERNAL_ARRAY_STORE_IC);
CASE(CALL_IC);
CASE(KEYED_CALL_IC);
+ CASE(TYPE_RECORDING_UNARY_OP_IC);
CASE(TYPE_RECORDING_BINARY_OP_IC);
CASE(COMPARE_IC);
}
}
+TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
+ Handle<Object> object = GetInfo(expr->position());
+ TypeInfo unknown = TypeInfo::Unknown();
+ if (!object->IsCode()) return unknown;
+ Handle<Code> code = Handle<Code>::cast(object);
+ ASSERT(code->is_type_recording_unary_op_stub());
+ TRUnaryOpIC::TypeInfo type = static_cast<TRUnaryOpIC::TypeInfo>(
+ code->type_recording_unary_op_type());
+ switch (type) {
+ case TRUnaryOpIC::SMI:
+ return TypeInfo::Smi();
+ case TRUnaryOpIC::HEAP_NUMBER:
+ return TypeInfo::Double();
+ default:
+ return unknown;
+ }
+}
+
+
TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
Handle<Object> object = GetInfo(expr->id());
TypeInfo unknown = TypeInfo::Unknown();
// Forward declarations.
class Assignment;
+class UnaryOperation;
class BinaryOperation;
class Call;
class CompareOperation;
bool LoadIsBuiltin(Property* expr, Builtins::Name id);
// Get type information for arithmetic operations and compares.
+ TypeInfo UnaryType(UnaryOperation* expr);
TypeInfo BinaryType(BinaryOperation* expr);
TypeInfo CompareType(CompareOperation* expr);
TypeInfo SwitchType(CaseClause* clause);
};
+// Get the integer part of a heap number.
+// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
+void IntegerConvert(MacroAssembler* masm,
+ Register result,
+ Register source) {
+ // Result may be rcx. If result and source are the same register, source will
+ // be overwritten.
+ ASSERT(!result.is(rdi) && !result.is(rbx));
+ // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
+ // cvttsd2si (32-bit version) directly.
+ Register double_exponent = rbx;
+ Register double_value = rdi;
+ NearLabel done, exponent_63_plus;
+ // Get double and extract exponent.
+ __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
+ // Clear result preemptively, in case we need to return zero.
+ __ xorl(result, result);
+ __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
+ // Double to remove sign bit, shift exponent down to least significant bits.
+ // and subtract bias to get the unshifted, unbiased exponent.
+ __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
+ __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
+ __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
+ // Check whether the exponent is too big for a 63 bit unsigned integer.
+ __ cmpl(double_exponent, Immediate(63));
+ __ j(above_equal, &exponent_63_plus);
+ // Handle exponent range 0..62.
+ __ cvttsd2siq(result, xmm0);
+ __ jmp(&done);
+
+ __ bind(&exponent_63_plus);
+ // Exponent negative or 63+.
+ __ cmpl(double_exponent, Immediate(83));
+ // If exponent negative or above 83, number contains no significant bits in
+ // the range 0..2^31, so result is zero, and rcx already holds zero.
+ __ j(above, &done);
+
+ // Exponent in rage 63..83.
+ // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
+ // the least significant exponent-52 bits.
+
+ // Negate low bits of mantissa if value is negative.
+ __ addq(double_value, double_value); // Move sign bit to carry.
+ __ sbbl(result, result); // And convert carry to -1 in result register.
+ // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
+ __ addl(double_value, result);
+ // Do xor in opposite directions depending on where we want the result
+ // (depending on whether result is rcx or not).
+
+ if (result.is(rcx)) {
+ __ xorl(double_value, result);
+ // Left shift mantissa by (exponent - mantissabits - 1) to save the
+ // bits that have positional values below 2^32 (the extra -1 comes from the
+ // doubling done above to move the sign bit into the carry flag).
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(double_value);
+ __ movl(result, double_value);
+ } else {
+ // As the then-branch, but move double-value to result before shifting.
+ __ xorl(result, double_value);
+ __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+ __ shll_cl(result);
+ }
+
+ __ bind(&done);
+}
+
+
+Handle<Code> GetTypeRecordingUnaryOpStub(int key,
+ TRUnaryOpIC::TypeInfo type_info) {
+ TypeRecordingUnaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
+
+
+void TypeRecordingUnaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operand_type_) {
+ case TRUnaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case TRUnaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case TRUnaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case TRUnaryOpIC::GENERIC:
+ GenerateGenericStub(masm);
+ break;
+ }
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ __ pop(rcx); // Save return address.
+ __ push(rax);
+ // Left and right arguments are now on top.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
+ __ Push(Smi::FromInt(MinorKey()));
+ __ Push(Smi::FromInt(op_));
+ __ Push(Smi::FromInt(operand_type_));
+
+ __ push(rcx); // Push return address.
+
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kTypeRecordingUnaryOp_Patch),
+ masm->isolate()),
+ 4,
+ 1);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateSmiStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateSmiStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
+ NearLabel non_smi;
+ Label slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
+ NearLabel non_smi;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
+ NearLabel* non_smi,
+ Label* slow) {
+ NearLabel done;
+ __ JumpIfNotSmi(rax, non_smi);
+ __ SmiNeg(rax, rax, &done);
+ __ jmp(slow);
+ __ bind(&done);
+ __ ret(0);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
+ NearLabel* non_smi) {
+ __ JumpIfNotSmi(rax, non_smi);
+ __ SmiNot(rax, rax);
+ __ ret(0);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateHeapNumberStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateHeapNumberStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+ NearLabel non_smi;
+ Label slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot(
+ MacroAssembler* masm) {
+ NearLabel non_smi;
+ Label slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
+ Label* slow) {
+ // Check if the operand is a heap number.
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, slow);
+
+ // Operand is a float, negate its value by flipping sign bit.
+ __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ Set(kScratchRegister, 0x01);
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(rdx, kScratchRegister); // Flip sign.
+ // rdx is value to store.
+ if (mode_ == UNARY_OVERWRITE) {
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
+ } else {
+ Label slow_allocate_heapnumber, heapnumber_allocated;
+ __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
+ __ jmp(&heapnumber_allocated);
+
+ __ bind(&slow_allocate_heapnumber);
+ __ EnterInternalFrame();
+ __ push(rdx);
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ movq(rcx, rax);
+ __ pop(rdx);
+ __ LeaveInternalFrame();
+
+ __ bind(&heapnumber_allocated);
+ // rcx: allocated 'empty' number
+ __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+ __ movq(rax, rcx);
+ }
+ __ ret(0);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeBitNot(
+ MacroAssembler* masm,
+ Label* slow) {
+ // Check if the operand is a heap number.
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, slow);
+
+ // Convert the heap number in rax to an untagged integer in rcx.
+ IntegerConvert(masm, rax, rax);
+
+ // Do the bitwise operation and smi tag the result.
+ __ notl(rax);
+ __ Integer32ToSmi(rax, rax);
+ __ ret(0);
+}
+
+
+// TODO(svenpanne): Use virtual functions instead of switch.
+void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::SUB:
+ GenerateGenericStubSub(masm);
+ break;
+ case Token::BIT_NOT:
+ GenerateGenericStubBitNot(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
+ NearLabel non_smi;
+ Label slow;
+ GenerateSmiCodeSub(masm, &non_smi, &slow);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeSub(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
+ NearLabel non_smi;
+ Label slow;
+ GenerateSmiCodeBitNot(masm, &non_smi);
+ __ bind(&non_smi);
+ GenerateHeapNumberCodeBitNot(masm, &slow);
+ __ bind(&slow);
+ GenerateGenericCodeFallback(masm);
+}
+
+
+void TypeRecordingUnaryOpStub::GenerateGenericCodeFallback(
+ MacroAssembler* masm) {
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ pop(rcx); // pop return address
+ __ push(rax);
+ __ push(rcx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+const char* TypeRecordingUnaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
+ kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "TypeRecordingUnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ TRUnaryOpIC::GetName(operand_type_));
+ return name_;
+}
+
+
Handle<Code> GetTypeRecordingBinaryOpStub(int key,
TRBinaryOpIC::TypeInfo type_info,
TRBinaryOpIC::TypeInfo result_type_info) {
}
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
- Register result,
- Register source) {
- // Result may be rcx. If result and source are the same register, source will
- // be overwritten.
- ASSERT(!result.is(rdi) && !result.is(rbx));
- // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
- // cvttsd2si (32-bit version) directly.
- Register double_exponent = rbx;
- Register double_value = rdi;
- NearLabel done, exponent_63_plus;
- // Get double and extract exponent.
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
- // Clear result preemptively, in case we need to return zero.
- __ xorl(result, result);
- __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
- // Double to remove sign bit, shift exponent down to least significant bits.
- // and subtract bias to get the unshifted, unbiased exponent.
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
- __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
- // Check whether the exponent is too big for a 63 bit unsigned integer.
- __ cmpl(double_exponent, Immediate(63));
- __ j(above_equal, &exponent_63_plus);
- // Handle exponent range 0..62.
- __ cvttsd2siq(result, xmm0);
- __ jmp(&done);
-
- __ bind(&exponent_63_plus);
- // Exponent negative or 63+.
- __ cmpl(double_exponent, Immediate(83));
- // If exponent negative or above 83, number contains no significant bits in
- // the range 0..2^31, so result is zero, and rcx already holds zero.
- __ j(above, &done);
-
- // Exponent in rage 63..83.
- // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
- // the least significant exponent-52 bits.
-
- // Negate low bits of mantissa if value is negative.
- __ addq(double_value, double_value); // Move sign bit to carry.
- __ sbbl(result, result); // And convert carry to -1 in result register.
- // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
- __ addl(double_value, result);
- // Do xor in opposite directions depending on where we want the result
- // (depending on whether result is rcx or not).
-
- if (result.is(rcx)) {
- __ xorl(double_value, result);
- // Left shift mantissa by (exponent - mantissabits - 1) to save the
- // bits that have positional values below 2^32 (the extra -1 comes from the
- // doubling done above to move the sign bit into the carry flag).
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(double_value);
- __ movl(result, double_value);
- } else {
- // As the then-branch, but move double-value to result before shifting.
- __ xorl(result, double_value);
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(result);
- }
-
- __ bind(&done);
-}
-
-
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
};
+class TypeRecordingUnaryOpStub: public CodeStub {
+ public:
+ TypeRecordingUnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operand_type_(TRUnaryOpIC::UNINITIALIZED),
+ name_(NULL) {
+ }
+
+ TypeRecordingUnaryOpStub(
+ int key,
+ TRUnaryOpIC::TypeInfo operand_type)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ operand_type_(operand_type),
+ name_(NULL) {
+ }
+
+ private:
+ Token::Value op_;
+ UnaryOverwriteMode mode_;
+
+ // Operand type information determined at runtime.
+ TRUnaryOpIC::TypeInfo operand_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("TypeRecordingUnaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ TRUnaryOpIC::GetName(operand_type_));
+ }
+#endif
+
+ class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
+ class OpBits: public BitField<Token::Value, 1, 7> {};
+ class OperandTypeInfoBits: public BitField<TRUnaryOpIC::TypeInfo, 8, 3> {};
+
+ Major MajorKey() { return TypeRecordingUnaryOp; }
+ int MinorKey() {
+ return ModeBits::encode(mode_)
+ | OpBits::encode(op_)
+ | OperandTypeInfoBits::encode(operand_type_);
+ }
+
+ // Note: A lot of the helper functions below will vanish when we use virtual
+ // function instead of switch more often.
+ void Generate(MacroAssembler* masm);
+
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateSmiStubSub(MacroAssembler* masm);
+ void GenerateSmiStubBitNot(MacroAssembler* masm);
+ void GenerateSmiCodeSub(MacroAssembler* masm, NearLabel* non_smi,
+ Label* slow);
+ void GenerateSmiCodeBitNot(MacroAssembler* masm, NearLabel* non_smi);
+
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateHeapNumberStubSub(MacroAssembler* masm);
+ void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
+ void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
+
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateGenericStubSub(MacroAssembler* masm);
+ void GenerateGenericStubBitNot(MacroAssembler* masm);
+ void GenerateGenericCodeFallback(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::TYPE_RECORDING_UNARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return TRUnaryOpIC::ToState(operand_type_);
+ }
+
+ virtual void FinishCode(Code* code) {
+ code->set_type_recording_unary_op_type(operand_type_);
+ }
+};
+
+
class TypeRecordingBinaryOpStub: public CodeStub {
public:
TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
break;
}
- case Token::SUB: {
- Comment cmt(masm_, "[ UnaryOperation (SUB)");
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
- // GenericUnaryOpStub expects the argument to be in the
- // accumulator register rax.
- VisitForAccumulatorValue(expr->expression());
- __ CallStub(&stub);
- context()->Plug(rax);
+ case Token::SUB:
+ EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
break;
- }
- case Token::BIT_NOT: {
- Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)");
- // The generic unary operation stub expects the argument to be
- // in the accumulator register rax.
- VisitForAccumulatorValue(expr->expression());
- Label done;
- bool inline_smi_case = ShouldInlineSmiCase(expr->op());
- if (inline_smi_case) {
- Label call_stub;
- __ JumpIfNotSmi(rax, &call_stub);
- __ SmiNot(rax, rax);
- __ jmp(&done);
- __ bind(&call_stub);
- }
- bool overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode mode =
- overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpFlags flags = inline_smi_case
- ? NO_UNARY_SMI_CODE_IN_STUB
- : NO_UNARY_FLAGS;
- GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
- __ CallStub(&stub);
- __ bind(&done);
- context()->Plug(rax);
+ case Token::BIT_NOT:
+ EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
break;
- }
default:
UNREACHABLE();
}
+void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
+ const char* comment) {
+ // TODO(svenpanne): Allowing format strings in Comment would be nice here...
+ Comment cmt(masm_, comment);
+ bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
+ UnaryOverwriteMode overwrite =
+ can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
+ TypeRecordingUnaryOpStub stub(expr->op(), overwrite);
+ // TypeRecordingUnaryOpStub expects the argument to be in the
+ // accumulator register rax.
+ VisitForAccumulatorValue(expr->expression());
+ SetSourcePosition(expr->position());
+ EmitCallIC(stub.GetCode(), NULL, expr->id());
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());