}
-void BinaryOpStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r1, r0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
-}
-
-
static void InitializeArrayConstructorDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
}
+// Generates code to call a C function to do a double operation.
+// This code never falls through, but returns with a heap number containing
+// the result in r0.
+// Register heapnumber_result must be a heap number in which the
+// result of the operation will be stored.
+// Requires the following layout on entry:
+// d0: Left value.
+// d1: Right value.
+// If soft float ABI, use also r0, r1, r2, r3.
+static void CallCCodeForDoubleOperation(MacroAssembler* masm,
+ Token::Value op,
+ Register heap_number_result,
+ Register scratch) {
+ // Assert that heap_number_result is callee-saved.
+ // We currently always use r5 to pass it.
+ ASSERT(heap_number_result.is(r5));
+
+ // Push the current return address before the C call. Return will be
+ // through pop(pc) below.
+ __ push(lr);
+ __ PrepareCallCFunction(0, 2, scratch);
+ if (!masm->use_eabi_hardfloat()) {
+ __ vmov(r0, r1, d0);
+ __ vmov(r2, r3, d1);
+ }
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(
+ ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+ }
+ // Store answer in the overwritable heap number. Double returned in
+ // registers r0 and r1 or in d0.
+ if (masm->use_eabi_hardfloat()) {
+ __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ } else {
+ __ Strd(r0, r1,
+ FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
+ }
+ // Place heap_number_result in r0 and return to the pushed return address.
+ __ mov(r0, Operand(heap_number_result));
+ __ pop(pc);
+}
+
+
+void BinaryOpStub::Initialize() {
+ platform_specific_bit_ = true; // VFP2 is a base requirement for V8
+}
+
+
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
+
+ __ Push(r1, r0);
+
+ __ mov(r2, Operand(Smi::FromInt(MinorKey())));
+ __ push(r2);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+ masm->isolate()),
+ 3,
+ 1);
+}
+
+
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+ MacroAssembler* masm) {
+ UNIMPLEMENTED();
+}
+
+
+void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
+ Token::Value op,
+ Register scratch1,
+ Register scratch2) {
+ Register left = r1;
+ Register right = r0;
+
+ ASSERT(right.is(r0));
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, ip));
+ STATIC_ASSERT(kSmiTag == 0);
+
+ Label not_smi_result;
+ switch (op) {
+ case Token::ADD:
+ __ add(right, left, Operand(right), SetCC); // Add optimistically.
+ __ Ret(vc);
+ __ sub(right, right, Operand(left)); // Revert optimistic add.
+ break;
+ case Token::SUB:
+ __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
+ __ Ret(vc);
+ __ sub(right, left, Operand(right)); // Revert optimistic subtract.
+ break;
+ case Token::MUL:
+ // Remove tag from one of the operands. This way the multiplication result
+ // will be a smi if it fits the smi range.
+ __ SmiUntag(ip, right);
+ // Do multiplication
+ // scratch1 = lower 32 bits of ip * left.
+ // scratch2 = higher 32 bits of ip * left.
+ __ smull(scratch1, scratch2, left, ip);
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ mov(ip, Operand(scratch1, ASR, 31));
+ __ cmp(ip, Operand(scratch2));
+ __ b(ne, ¬_smi_result);
+ // Go slow on zero result to handle -0.
+ __ cmp(scratch1, Operand::Zero());
+ __ mov(right, Operand(scratch1), LeaveCC, ne);
+ __ Ret(ne);
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ add(scratch2, right, Operand(left), SetCC);
+ __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
+ __ Ret(pl); // Return smi 0 if the non-zero one was positive.
+ // We fall through here if we multiplied a negative number with 0, because
+ // that would mean we should produce -0.
+ break;
+ case Token::DIV: {
+ Label div_with_sdiv;
+
+ // Check for 0 divisor.
+ __ cmp(right, Operand::Zero());
+ __ b(eq, ¬_smi_result);
+
+ // Check for power of two on the right hand side.
+ __ sub(scratch1, right, Operand(1));
+ __ tst(scratch1, right);
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ __ b(ne, &div_with_sdiv);
+ // Check for no remainder.
+ __ tst(left, scratch1);
+ __ b(ne, ¬_smi_result);
+ // Check for positive left hand side.
+ __ cmp(left, Operand::Zero());
+ __ b(mi, &div_with_sdiv);
+ } else {
+ __ b(ne, ¬_smi_result);
+ // Check for positive and no remainder.
+ __ orr(scratch2, scratch1, Operand(0x80000000u));
+ __ tst(left, scratch2);
+ __ b(ne, ¬_smi_result);
+ }
+
+ // Perform division by shifting.
+ __ clz(scratch1, scratch1);
+ __ rsb(scratch1, scratch1, Operand(31));
+ __ mov(right, Operand(left, LSR, scratch1));
+ __ Ret();
+
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(masm, SUDIV);
+ Label result_not_zero;
+
+ __ bind(&div_with_sdiv);
+ // Do division.
+ __ sdiv(scratch1, left, right);
+ // Check that the remainder is zero.
+ __ mls(scratch2, scratch1, right, left);
+ __ cmp(scratch2, Operand::Zero());
+ __ b(ne, ¬_smi_result);
+ // Check for negative zero result.
+ __ cmp(scratch1, Operand::Zero());
+ __ b(ne, &result_not_zero);
+ __ cmp(right, Operand::Zero());
+ __ b(lt, ¬_smi_result);
+ __ bind(&result_not_zero);
+ // Check for the corner case of dividing the most negative smi by -1.
+ __ cmp(scratch1, Operand(0x40000000));
+ __ b(eq, ¬_smi_result);
+ // Tag and return the result.
+ __ SmiTag(right, scratch1);
+ __ Ret();
+ }
+ break;
+ }
+ case Token::MOD: {
+ Label modulo_with_sdiv;
+
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ // Check for x % 0.
+ __ cmp(right, Operand::Zero());
+ __ b(eq, ¬_smi_result);
+
+ // Check for two positive smis.
+ __ orr(scratch1, left, Operand(right));
+ __ tst(scratch1, Operand(0x80000000u));
+ __ b(ne, &modulo_with_sdiv);
+
+ // Check for power of two on the right hand side.
+ __ sub(scratch1, right, Operand(1));
+ __ tst(scratch1, right);
+ __ b(ne, &modulo_with_sdiv);
+ } else {
+ // Check for two positive smis.
+ __ orr(scratch1, left, Operand(right));
+ __ tst(scratch1, Operand(0x80000000u));
+ __ b(ne, ¬_smi_result);
+
+ // Check for power of two on the right hand side.
+ __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result);
+ }
+
+ // Perform modulus by masking (scratch1 contains right - 1).
+ __ and_(right, left, Operand(scratch1));
+ __ Ret();
+
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ CpuFeatureScope scope(masm, SUDIV);
+ __ bind(&modulo_with_sdiv);
+ __ mov(scratch2, right);
+ // Perform modulus with sdiv and mls.
+ __ sdiv(scratch1, left, right);
+ __ mls(right, scratch1, right, left);
+ // Return if the result is not 0.
+ __ cmp(right, Operand::Zero());
+ __ Ret(ne);
+ // The result is 0, check for -0 case.
+ __ cmp(left, Operand::Zero());
+ __ Ret(pl);
+ // This is a -0 case, restore the value of right.
+ __ mov(right, scratch2);
+ // We fall through here to not_smi_result to produce -0.
+ }
+ break;
+ }
+ case Token::BIT_OR:
+ __ orr(right, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_AND:
+ __ and_(right, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::BIT_XOR:
+ __ eor(right, left, Operand(right));
+ __ Ret();
+ break;
+ case Token::SAR:
+ // Remove tags from right operand.
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ mov(right, Operand(left, ASR, scratch1));
+ // Smi tag result.
+ __ bic(right, right, Operand(kSmiTagMask));
+ __ Ret();
+ break;
+ case Token::SHR:
+ // Remove tags from operands. We can't do this on a 31 bit number
+ // because then the 0s get shifted into bit 30 instead of bit 31.
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ mov(scratch1, Operand(scratch1, LSR, scratch2));
+ // Unsigned shift is not allowed to produce a negative number, so
+ // check the sign bit and the sign bit after Smi tagging.
+ __ tst(scratch1, Operand(0xc0000000));
+ __ b(ne, ¬_smi_result);
+ // Smi tag result.
+ __ SmiTag(right, scratch1);
+ __ Ret();
+ break;
+ case Token::SHL:
+ // Remove tags from operands.
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ mov(scratch1, Operand(scratch1, LSL, scratch2));
+ // Check that the signed result fits in a Smi.
+ __ TrySmiTag(right, scratch1, ¬_smi_result);
+ __ Ret();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(¬_smi_result);
+}
+
+
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode);
+
+
+void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required,
+ Label* miss,
+ Token::Value op,
+ OverwriteMode mode,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ Register left = r1;
+ Register right = r0;
+ Register result = scratch3;
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
+
+ ASSERT(smi_operands || (not_numbers != NULL));
+ if (smi_operands) {
+ __ AssertSmi(left);
+ __ AssertSmi(right);
+ }
+ if (left_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, miss);
+ }
+ if (right_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, miss);
+ }
+
+ Register heap_number_map = scratch4;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ switch (op) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // Allocate new heap number for result.
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
+
+ // Load left and right operands into d0 and d1.
+ if (smi_operands) {
+ __ SmiToDouble(d1, right);
+ __ SmiToDouble(d0, left);
+ } else {
+ // Load right operand into d1.
+ if (right_type == BinaryOpIC::INT32) {
+ __ LoadNumberAsInt32Double(
+ right, d1, heap_number_map, scratch1, d8, miss);
+ } else {
+ Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
+ __ LoadNumber(right, d1, heap_number_map, scratch1, fail);
+ }
+ // Load left operand into d0.
+ if (left_type == BinaryOpIC::INT32) {
+ __ LoadNumberAsInt32Double(
+ left, d0, heap_number_map, scratch1, d8, miss);
+ } else {
+ Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
+ __ LoadNumber(
+ left, d0, heap_number_map, scratch1, fail);
+ }
+ }
+
+ // Calculate the result.
+ if (op != Token::MOD) {
+ // Using VFP registers:
+ // d0: Left value
+ // d1: Right value
+ switch (op) {
+ case Token::ADD:
+ __ vadd(d5, d0, d1);
+ break;
+ case Token::SUB:
+ __ vsub(d5, d0, d1);
+ break;
+ case Token::MUL:
+ __ vmul(d5, d0, d1);
+ break;
+ case Token::DIV:
+ __ vdiv(d5, d0, d1);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ sub(r0, result, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
+ __ Ret();
+ } else {
+ // Call the C function to handle the double operation.
+ CallCCodeForDoubleOperation(masm, op, result, scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
+ }
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ if (smi_operands) {
+ __ SmiUntag(r3, left);
+ __ SmiUntag(r2, right);
+ } else {
+ // Convert operands to 32-bit integers. Right in r2 and left in r3.
+ __ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers);
+ __ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers);
+ }
+
+ Label result_not_a_smi;
+ switch (op) {
+ case Token::BIT_OR:
+ __ orr(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_XOR:
+ __ eor(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_AND:
+ __ and_(r2, r3, Operand(r2));
+ break;
+ case Token::SAR:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(r2, r2, 5);
+ __ mov(r2, Operand(r3, ASR, r2));
+ break;
+ case Token::SHR:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(r2, r2, 5);
+ __ mov(r2, Operand(r3, LSR, r2), SetCC);
+ // SHR is special because it is required to produce a positive answer.
+ // The code below for writing into heap numbers isn't capable of
+ // writing the register as an unsigned int so we go to slow case if we
+ // hit this case.
+ __ b(mi, &result_not_a_smi);
+ break;
+ case Token::SHL:
+ // Use only the 5 least significant bits of the shift count.
+ __ GetLeastBitsFromInt32(r2, r2, 5);
+ __ mov(r2, Operand(r3, LSL, r2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Check that the *signed* result fits in a smi.
+ __ TrySmiTag(r0, r2, &result_not_a_smi);
+ __ Ret();
+
+ // Allocate new heap number for result.
+ __ bind(&result_not_a_smi);
+ if (smi_operands) {
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ } else {
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required,
+ mode);
+ }
+
+ // r2: Answer as signed int32.
+ // result: Heap number to write answer into.
+
+ // Nothing can go wrong now, so move the heap number to r0, which is the
+ // result.
+ __ mov(r0, Operand(result));
+
+ // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
+ // mentioned above SHR needs to always produce a positive result.
+ __ vmov(s0, r2);
+ if (op == Token::SHR) {
+ __ vcvt_f64_u32(d0, s0);
+ } else {
+ __ vcvt_f64_s32(d0, s0);
+ }
+ __ sub(r3, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ Ret();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+// Generate the smi code. If the operation on smis are successful this return is
+// generated. If the result is not a smi and heap number allocation is not
+// requested the code falls through. If number allocation is requested but a
+// heap number cannot be allocated the code jumps to the label gc_required.
+void BinaryOpStub_GenerateSmiCode(
+ MacroAssembler* masm,
+ Label* use_runtime,
+ Label* gc_required,
+ Token::Value op,
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ OverwriteMode mode,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ Label not_smis;
+
+ Register left = r1;
+ Register right = r0;
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
+
+ // Perform combined smi check on both operands.
+ __ orr(scratch1, left, Operand(right));
+ __ JumpIfNotSmi(scratch1, ¬_smis);
+
+ // If the smi-smi operation results in a smi return is generated.
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op, scratch1, scratch2);
+
+ // If heap number results are possible generate the result in an allocated
+ // heap number.
+ if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
+ BinaryOpStub_GenerateFPOperation(
+ masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
+ use_runtime, gc_required, ¬_smis, op, mode, scratch2, scratch3,
+ scratch1, scratch4);
+ }
+ __ bind(¬_smis);
+}
+
+
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label right_arg_changed, call_runtime;
+
+ if (op_ == Token::MOD && encoded_right_arg_.has_value) {
+ // It is guaranteed that the value will fit into a Smi, because if it
+ // didn't, we wouldn't be here, see BinaryOp_Patch.
+ __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value())));
+ __ b(ne, &right_arg_changed);
+ }
+
+ if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+ result_type_ == BinaryOpIC::SMI) {
+ // Only allow smi results.
+ BinaryOpStub_GenerateSmiCode(masm, &call_runtime, NULL, op_,
+ NO_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
+ } else {
+ // Allow heap number result and don't make a transition if a heap number
+ // cannot be allocated.
+ BinaryOpStub_GenerateSmiCode(masm, &call_runtime, &call_runtime, op_,
+ ALLOW_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
+ }
+
+ // Code falls through if the result is not returned as either a smi or heap
+ // number.
+ __ bind(&right_arg_changed);
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
+}
+
+
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ // If both arguments are strings, call the string add stub.
+ // Otherwise, do a transition.
+
+ // Registers containing left and right operands respectively.
+ Register left = r1;
+ Register right = r0;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &call_runtime);
+ __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &call_runtime);
+
+ // Test if right operand is a string.
+ __ JumpIfSmi(right, &call_runtime);
+ __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &call_runtime);
+
+ StringAddStub string_add_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&call_runtime);
+ GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
+
+ Register left = r1;
+ Register right = r0;
+ Register scratch1 = r4;
+ Register scratch2 = r9;
+ Register scratch3 = r5;
+ LowDwVfpRegister double_scratch = d0;
+
+ Register heap_number_result = no_reg;
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ Label call_runtime;
+ // Labels for type transition, used for wrong input or output types.
+ // Both label are currently actually bound to the same position. We use two
+ // different label to differentiate the cause leading to type transition.
+ Label transition;
+
+ // Smi-smi fast case.
+ Label skip;
+ __ orr(scratch1, left, right);
+ __ JumpIfNotSmi(scratch1, &skip);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op_, scratch2, scratch3);
+ // Fall through if the result is not a smi.
+ __ bind(&skip);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, &transition);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, &transition);
+ }
+ // Load both operands and check that they are 32-bit integer.
+ // Jump to type transition if they are not. The registers r0 and r1 (right
+ // and left) are preserved for the runtime call.
+ __ LoadNumberAsInt32Double(
+ right, d1, heap_number_map, scratch1, d8, &transition);
+ __ LoadNumberAsInt32Double(
+ left, d0, heap_number_map, scratch1, d8, &transition);
+
+ if (op_ != Token::MOD) {
+ Label return_heap_number;
+ switch (op_) {
+ case Token::ADD:
+ __ vadd(d5, d0, d1);
+ break;
+ case Token::SUB:
+ __ vsub(d5, d0, d1);
+ break;
+ case Token::MUL:
+ __ vmul(d5, d0, d1);
+ break;
+ case Token::DIV:
+ __ vdiv(d5, d0, d1);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (result_type_ <= BinaryOpIC::INT32) {
+ __ TryDoubleToInt32Exact(scratch1, d5, d8);
+ // If the ne condition is set, result does
+ // not fit in a 32-bit integer.
+ __ b(ne, &transition);
+ // Try to tag the result as a Smi, return heap number on overflow.
+ __ SmiTag(scratch1, SetCC);
+ __ b(vs, &return_heap_number);
+ // Check for minus zero, transition in that case (because we need
+ // to return a heap number).
+ Label not_zero;
+ ASSERT(kSmiTag == 0);
+ __ b(ne, ¬_zero);
+ __ VmovHigh(scratch2, d5);
+ __ tst(scratch2, Operand(HeapNumber::kSignMask));
+ __ b(ne, &transition);
+ __ bind(¬_zero);
+ __ mov(r0, scratch1);
+ __ Ret();
+ }
+
+ __ bind(&return_heap_number);
+ // Return a heap number, or fall through to type transition or runtime
+ // call if we can't.
+ // We are using vfp registers so r5 is available.
+ heap_number_result = r5;
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
+ __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ mov(r0, heap_number_result);
+ __ Ret();
+
+ // A DIV operation expecting an integer result falls through
+ // to type transition.
+
+ } else {
+ if (encoded_right_arg_.has_value) {
+ __ Vmov(d8, fixed_right_arg_value(), scratch1);
+ __ VFPCompareAndSetFlags(d1, d8);
+ __ b(ne, &transition);
+ }
+
+ // Allocate a heap number to store the result.
+ heap_number_result = r5;
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
+
+ // Call the C function to handle the double operation.
+ CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
+ if (FLAG_debug_code) {
+ __ stop("Unreachable code.");
+ }
+
+ __ b(&call_runtime);
+ }
+
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SAR:
+ case Token::SHR:
+ case Token::SHL: {
+ Label return_heap_number;
+ // Convert operands to 32-bit integers. Right in r2 and left in r3. The
+ // registers r0 and r1 (right and left) are preserved for the runtime
+ // call.
+ __ LoadNumberAsInt32(left, r3, heap_number_map,
+ scratch1, d0, d1, &transition);
+ __ LoadNumberAsInt32(right, r2, heap_number_map,
+ scratch1, d0, d1, &transition);
+
+ // The ECMA-262 standard specifies that, for shift operations, only the
+ // 5 least significant bits of the shift value should be used.
+ switch (op_) {
+ case Token::BIT_OR:
+ __ orr(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_XOR:
+ __ eor(r2, r3, Operand(r2));
+ break;
+ case Token::BIT_AND:
+ __ and_(r2, r3, Operand(r2));
+ break;
+ case Token::SAR:
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, ASR, r2));
+ break;
+ case Token::SHR:
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, LSR, r2), SetCC);
+ // SHR is special because it is required to produce a positive answer.
+ // We only get a negative result if the shift value (r2) is 0.
+ // This result cannot be respresented as a signed 32-bit integer, try
+ // to return a heap number if we can.
+ __ b(mi, (result_type_ <= BinaryOpIC::INT32)
+ ? &transition
+ : &return_heap_number);
+ break;
+ case Token::SHL:
+ __ and_(r2, r2, Operand(0x1f));
+ __ mov(r2, Operand(r3, LSL, r2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Check if the result fits in a smi. If not try to return a heap number.
+ // (We know the result is an int32).
+ __ TrySmiTag(r0, r2, &return_heap_number);
+ __ Ret();
+
+ __ bind(&return_heap_number);
+ heap_number_result = r5;
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
+
+ if (op_ != Token::SHR) {
+ // Convert the result to a floating point value.
+ __ vmov(double_scratch.low(), r2);
+ __ vcvt_f64_s32(double_scratch, double_scratch.low());
+ } else {
+ // The result must be interpreted as an unsigned 32-bit integer.
+ __ vmov(double_scratch.low(), r2);
+ __ vcvt_f64_u32(double_scratch, double_scratch.low());
+ }
+
+ // Store the result.
+ __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
+ __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
+ __ mov(r0, heap_number_result);
+ __ Ret();
+
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+
+ // We never expect DIV to yield an integer result, so we always generate
+ // type transition code for DIV operations expecting an integer result: the
+ // code will fall through to this type transition.
+ if (transition.is_linked() ||
+ ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
+ }
+
+ __ bind(&call_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
+}
+
+
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (op_ == Token::ADD) {
+ // Handle string addition here, because it is the only operation
+ // that does not do a ToNumber conversion on the operands.
+ GenerateAddStrings(masm);
+ }
+
+ // Convert oddball arguments to numbers.
+ Label check, done;
+ __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &check);
+ if (Token::IsBitOp(op_)) {
+ __ mov(r1, Operand(Smi::FromInt(0)));
+ } else {
+ __ LoadRoot(r1, Heap::kNanValueRootIndex);
+ }
+ __ jmp(&done);
+ __ bind(&check);
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &done);
+ if (Token::IsBitOp(op_)) {
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ } else {
+ __ LoadRoot(r0, Heap::kNanValueRootIndex);
+ }
+ __ bind(&done);
+
+ GenerateNumberStub(masm);
+}
+
+
+void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
+ Label call_runtime, transition;
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &transition, &call_runtime, &transition, op_, mode_, r6, r4, r5, r9);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
+}
+
+
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+ Label call_runtime, call_string_add_or_runtime, transition;
+
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_,
+ r5, r6, r4, r9);
+
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_, r6,
+ r4, r5, r9);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_string_add_or_runtime);
+ if (op_ == Token::ADD) {
+ GenerateAddStrings(masm);
+ }
+
+ __ bind(&call_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
+}
+
+
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD);
+ Label left_not_string, call_runtime;
+
+ Register left = r1;
+ Register right = r0;
+
+ // Check if left argument is a string.
+ __ JumpIfSmi(left, &left_not_string);
+ __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &left_not_string);
+
+ StringAddStub string_add_left_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_left_stub);
+
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
+ __ JumpIfSmi(right, &call_runtime);
+ __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &call_runtime);
+
+ StringAddStub string_add_right_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_right_stub);
+
+ // At least one argument is not a string.
+ __ bind(&call_runtime);
+}
+
+
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode) {
+ // Code below will scratch result if allocation fails. To keep both arguments
+ // intact for the runtime call result cannot be one of these.
+ ASSERT(!result.is(r0) && !result.is(r1));
+
+ if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
+ Label skip_allocation, allocated;
+ Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0;
+ // If the overwritable operand is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
+ // Allocate a heap number for the result.
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ __ b(&allocated);
+ __ bind(&skip_allocation);
+ // Use object holding the overwritable operand for result.
+ __ mov(result, Operand(overwritable_operand));
+ __ bind(&allocated);
+ } else {
+ ASSERT(mode == NO_OVERWRITE);
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ }
+}
+
+
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ Push(r1, r0);
+}
+
+
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Untagged case: double input in d2, double result goes
// into d2.
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
- BinaryOpStub::GenerateAheadOfTime(isolate);
}
virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
return store_mode_;
}
- Handle<Type> type() const { return type_; }
+ TypeInfo type() const { return type_; }
BailoutId AssignmentId() const { return assignment_id_; }
bool is_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
- Handle<Type> type_;
+ TypeInfo type_;
Expression* expression_;
int pos_;
}
-template <>
-HValue* CodeStubGraphBuilder<BinaryOpStub>::BuildCodeInitializedStub() {
- BinaryOpStub* stub = casted_stub();
- HValue* left = GetParameter(0);
- HValue* right = GetParameter(1);
-
- Handle<Type> left_type = stub->GetLeftType(isolate());
- Handle<Type> right_type = stub->GetRightType(isolate());
- Handle<Type> result_type = stub->GetResultType(isolate());
-
- ASSERT(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
- (stub->HasSideEffects(isolate()) || !result_type->Is(Type::None())));
-
- HValue* result = NULL;
- if (stub->operation() == Token::ADD &&
- (left_type->Maybe(Type::String()) || right_type->Maybe(Type::String())) &&
- !left_type->Is(Type::String()) && !right_type->Is(Type::String())) {
- // For the generic add stub a fast case for String add is performance
- // critical.
- if (left_type->Maybe(Type::String())) {
- IfBuilder left_string(this);
- left_string.IfNot<HIsSmiAndBranch>(left);
- left_string.AndIf<HIsStringAndBranch>(left);
- left_string.Then();
- Push(Add<HStringAdd>(left, right, STRING_ADD_CHECK_RIGHT));
- left_string.Else();
- Push(AddInstruction(BuildBinaryOperation(stub->operation(),
- left, right, left_type, right_type, result_type,
- stub->fixed_right_arg(), true)));
- left_string.End();
- result = Pop();
- } else {
- IfBuilder right_string(this);
- right_string.IfNot<HIsSmiAndBranch>(right);
- right_string.AndIf<HIsStringAndBranch>(right);
- right_string.Then();
- Push(Add<HStringAdd>(left, right, STRING_ADD_CHECK_LEFT));
- right_string.Else();
- Push(AddInstruction(BuildBinaryOperation(stub->operation(),
- left, right, left_type, right_type, result_type,
- stub->fixed_right_arg(), true)));
- right_string.End();
- result = Pop();
- }
- } else {
- result = AddInstruction(BuildBinaryOperation(stub->operation(),
- left, right, left_type, right_type, result_type,
- stub->fixed_right_arg(), true));
- }
-
- // If we encounter a generic argument, the number conversion is
- // observable, thus we cannot afford to bail out after the fact.
- if (!stub->HasSideEffects(isolate())) {
- if (result_type->Is(Type::Smi())) {
- if (stub->operation() == Token::SHR) {
- // TODO(olivf) Replace this by a SmiTagU Instruction.
- // 0x40000000: this number would convert to negative when interpreting
- // the register as signed value;
- IfBuilder if_of(this);
- if_of.IfNot<HCompareNumericAndBranch>(result,
- Add<HConstant>(static_cast<int>(0x40000000)), Token::EQ_STRICT);
- if_of.Then();
- if_of.ElseDeopt("UInt->Smi oveflow");
- if_of.End();
- }
- }
- result = EnforceNumberType(result, result_type);
- }
-
- // Reuse the double box if we are allowed to (i.e. chained binops).
- if (stub->CanReuseDoubleBox()) {
- HValue* reuse = (stub->mode() == OVERWRITE_LEFT) ? left : right;
- IfBuilder if_heap_number(this);
- if_heap_number.IfNot<HIsSmiAndBranch>(reuse);
- if_heap_number.Then();
- HValue* res_val = Add<HForceRepresentation>(result,
- Representation::Double());
- HObjectAccess access = HObjectAccess::ForHeapNumberValue();
- Add<HStoreNamedField>(reuse, access, res_val);
- Push(reuse);
- if_heap_number.Else();
- Push(result);
- if_heap_number.End();
- result = Pop();
- }
-
- return result;
-}
-
-
-Handle<Code> BinaryOpStub::GenerateCode(Isolate* isolate) {
- return DoGenerateCode(isolate, this);
-}
-
-
template <>
HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
ToBooleanStub* stub = casted_stub();
? FindCodeInSpecialCache(&code, isolate)
: FindCodeInCache(&code, isolate)) {
ASSERT(IsPregenerated(isolate) == code->is_pregenerated());
- ASSERT(GetCodeKind() == code->kind());
return Handle<Code>(code);
}
}
-void BinaryOpStub::PrintBaseName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* ovr = "";
- if (mode_ == OVERWRITE_LEFT) ovr = "_ReuseLeft";
- if (mode_ == OVERWRITE_RIGHT) ovr = "_ReuseRight";
- stream->Add("BinaryOpStub_%s%s", op_name, ovr);
-}
-
-
-void BinaryOpStub::PrintState(StringStream* stream) {
- stream->Add("(");
- stream->Add(StateToName(left_state_));
- if (left_bool_) {
- stream->Add(",Boolean");
- }
- stream->Add("*");
- if (fixed_right_arg_.has_value) {
- stream->Add("%d", fixed_right_arg_.value);
- } else {
- stream->Add(StateToName(right_state_));
- if (right_bool_) {
- stream->Add(",Boolean");
- }
- }
- stream->Add("->");
- stream->Add(StateToName(result_state_));
- stream->Add(")");
-}
-
-
-Maybe<Handle<Object> > BinaryOpStub::Result(Handle<Object> left,
- Handle<Object> right,
- Isolate* isolate) {
- Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
- Builtins::JavaScript func = BinaryOpIC::TokenToJSBuiltin(op_);
- Object* builtin = builtins->javascript_builtin(func);
- Handle<JSFunction> builtin_function =
- Handle<JSFunction>(JSFunction::cast(builtin), isolate);
- bool caught_exception;
- Handle<Object> result = Execution::Call(isolate, builtin_function, left,
- 1, &right, &caught_exception);
- return Maybe<Handle<Object> >(!caught_exception, result);
-}
-
-
-void BinaryOpStub::Initialize() {
- fixed_right_arg_.has_value = false;
- left_state_ = right_state_ = result_state_ = NONE;
- left_bool_ = right_bool_ = false;
-}
-
-
-void BinaryOpStub::Generate(Token::Value op,
- State left,
- State right,
- State result,
- Isolate* isolate) {
- BinaryOpStub stub(INITIALIZED);
- stub.op_ = op;
- stub.left_state_ = left;
- stub.right_state_ = right;
- stub.result_state_ = result;
- stub.mode_ = NO_OVERWRITE;
- stub.GetCode(isolate);
- stub.mode_ = OVERWRITE_LEFT;
- stub.GetCode(isolate);
-}
-
-
-void BinaryOpStub::GenerateAheadOfTime(Isolate* isolate) {
- Token::Value binop[] = {Token::SUB, Token::MOD, Token::DIV, Token::MUL,
- Token::ADD, Token::SAR, Token::BIT_OR, Token::BIT_AND,
- Token::BIT_XOR, Token::SHL, Token::SHR};
- // TODO(olivf) NumberTagU is not snapshot safe yet so we have to skip SHR
- // since that produces a unsigned int32.
- Token::Value bitop[] = {Token::BIT_OR, Token::BIT_AND, Token::BIT_XOR,
- Token::SAR, Token::SHL /* Token::SHR */};
- Token::Value arithop[] = {Token::ADD, Token::SUB, Token::MOD,
- Token::DIV, Token::MUL};
- for (unsigned i = 0; i < ARRAY_SIZE(binop); i++) {
- BinaryOpStub stub(UNINITIALIZED);
- stub.op_ = binop[i];
- stub.GetCode(isolate);
- }
- for (unsigned i = 0; i < ARRAY_SIZE(arithop); i++) {
- Generate(arithop[i], SMI, SMI, SMI, isolate);
- Generate(arithop[i], SMI, SMI, INT32, isolate);
- Generate(arithop[i], SMI, SMI, NUMBER, isolate);
- Generate(arithop[i], INT32, INT32, INT32, isolate);
- Generate(arithop[i], NUMBER, SMI, SMI, isolate);
- Generate(arithop[i], NUMBER, SMI, NUMBER, isolate);
- Generate(arithop[i], NUMBER, INT32, NUMBER, isolate);
- Generate(arithop[i], NUMBER, NUMBER, NUMBER, isolate);
- }
- Generate(Token::SHR, SMI, SMI, SMI, isolate);
- for (unsigned i = 0; i < ARRAY_SIZE(bitop); i++) {
- Generate(bitop[i], SMI, SMI, SMI, isolate);
- Generate(bitop[i], SMI, INT32, INT32, isolate);
- Generate(bitop[i], INT32, INT32, INT32, isolate);
- Generate(bitop[i], NUMBER, INT32, INT32, isolate);
- Generate(bitop[i], NUMBER, NUMBER, INT32, isolate);
- }
- Generate(Token::ADD, STRING, STRING, STRING, isolate);
-
- BinaryOpStub stub(INITIALIZED);
- stub.op_ = Token::MOD;
- stub.left_state_ = SMI;
- stub.right_state_ = SMI;
- stub.result_state_ = SMI;
- stub.fixed_right_arg_.has_value = true;
- stub.fixed_right_arg_.value = 4;
- stub.mode_ = NO_OVERWRITE;
- stub.GetCode(isolate);
- stub.fixed_right_arg_.value = 8;
- stub.GetCode(isolate);
-}
-
-
-bool BinaryOpStub::can_encode_arg_value(int32_t value) const {
- return op_ == Token::MOD && value > 0 && IsPowerOf2(value) &&
- FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
-}
-
-
-int BinaryOpStub::encode_arg_value(int32_t value) const {
- ASSERT(can_encode_arg_value(value));
- return WhichPowerOf2(value);
-}
-
-
-int32_t BinaryOpStub::decode_arg_value(int value) const {
- return 1 << value;
-}
-
-
-int BinaryOpStub::encode_token(Token::Value op) const {
- ASSERT(op >= FIRST_TOKEN && op <= LAST_TOKEN);
- return op - FIRST_TOKEN;
-}
-
-
-Token::Value BinaryOpStub::decode_token(int op) const {
- int res = op + FIRST_TOKEN;
- ASSERT(res >= FIRST_TOKEN && res <= LAST_TOKEN);
- return static_cast<Token::Value>(res);
-}
-
-
-const char* BinaryOpStub::StateToName(State state) {
- switch (state) {
- case NONE:
- return "None";
- case SMI:
- return "Smi";
- case INT32:
- return "Int32";
- case NUMBER:
- return "Number";
- case STRING:
- return "String";
- case GENERIC:
- return "Generic";
- }
- return "";
-}
-
-
-void BinaryOpStub::UpdateStatus(Handle<Object> left,
- Handle<Object> right,
- Maybe<Handle<Object> > result) {
- int old_state = GetExtraICState();
-
- UpdateStatus(left, &left_state_, &left_bool_);
- UpdateStatus(right, &right_state_, &right_bool_);
-
- int32_t value;
- bool new_has_fixed_right_arg =
- right->ToInt32(&value) && can_encode_arg_value(value) &&
- (left_state_ == SMI || left_state_ == INT32) &&
- (result_state_ == NONE || !fixed_right_arg_.has_value);
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
- fixed_right_arg_ = Maybe<int32_t>(new_has_fixed_right_arg, value);
-
- if (result.has_value) UpdateStatus(result.value, &result_state_, NULL);
-
- State max_result = has_int_result() ? INT32 : NUMBER;
- State max_input = Max(left_state_, right_state_);
-
- // Avoid unnecessary Representation changes.
- if (left_state_ == STRING && right_state_ < STRING) {
- right_state_ = GENERIC;
- } else if (right_state_ == STRING && left_state_ < STRING) {
- left_state_ = GENERIC;
- } else if ((right_state_ == GENERIC && left_state_ != STRING) ||
- (left_state_ == GENERIC && right_state_ != STRING)) {
- left_state_ = right_state_ = GENERIC;
- } else if (max_input <= NUMBER && max_input > result_state_) {
- result_state_ = Min(max_result, max_input);
+ BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
+ if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
+ // The OddballStub handles a number and an oddball, not two oddballs.
+ operands_type = BinaryOpIC::GENERIC;
}
-
- ASSERT(result_state_ <= max_result || op_ == Token::ADD);
-
- if (old_state == GetExtraICState()) {
- // Since the fpu is to precise, we might bail out on numbers which
- // actually would truncate with 64 bit precision.
- ASSERT(!CpuFeatures::IsSupported(SSE2) &&
- result_state_ <= INT32);
- result_state_ = NUMBER;
+ switch (operands_type) {
+ case BinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case BinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case BinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case BinaryOpIC::NUMBER:
+ GenerateNumberStub(masm);
+ break;
+ case BinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
+ case BinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case BinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
}
}
-void BinaryOpStub::UpdateStatus(Handle<Object> object,
- State* state,
- bool* bool_state) {
- if (object->IsBoolean() && bool_state != NULL) {
- *bool_state = true;
- return;
- }
- v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(object);
- if (object->IsUndefined()) {
- // Undefined will be automatically truncated for us by HChange.
- type = (op_ == Token::BIT_AND || op_ == Token::BIT_OR ||
- op_ == Token::BIT_XOR || op_ == Token::SAR ||
- op_ == Token::SHL || op_ == Token::SHR)
- ? TypeInfo::Integer32()
- : TypeInfo::Double();
- }
- State int_state = SmiValuesAre32Bits() ? NUMBER : INT32;
- State new_state = NONE;
- if (type.IsSmi()) {
- new_state = SMI;
- } else if (type.IsInteger32()) {
- new_state = int_state;
- } else if (type.IsNumber()) {
- new_state = NUMBER;
- } else if (object->IsString() && operation() == Token::ADD) {
- new_state = STRING;
- } else {
- new_state = GENERIC;
- }
- if ((new_state <= NUMBER && *state > NUMBER) ||
- (new_state > NUMBER && *state <= NUMBER && *state != NONE)) {
- new_state = GENERIC;
- }
- *state = Max(*state, new_state);
-}
+#define __ ACCESS_MASM(masm)
-Handle<Type> BinaryOpStub::StateToType(State state,
- bool seen_bool,
- Isolate* isolate) {
- Handle<Type> t = handle(Type::None(), isolate);
- switch (state) {
- case NUMBER:
- t = handle(Type::Union(t, handle(Type::Double(), isolate)), isolate);
- // Fall through.
- case INT32:
- t = handle(Type::Union(t, handle(Type::Signed32(), isolate)), isolate);
- // Fall through.
- case SMI:
- t = handle(Type::Union(t, handle(Type::Smi(), isolate)), isolate);
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, CALL_FUNCTION);
break;
-
- case STRING:
- t = handle(Type::Union(t, handle(Type::String(), isolate)), isolate);
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, CALL_FUNCTION);
break;
- case GENERIC:
- return handle(Type::Any(), isolate);
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, CALL_FUNCTION);
break;
- case NONE:
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, CALL_FUNCTION);
break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, CALL_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, CALL_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, CALL_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, CALL_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, CALL_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, CALL_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, CALL_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
}
- if (seen_bool) {
- t = handle(Type::Union(t, handle(Type::Boolean(), isolate)), isolate);
- }
- return t;
-}
-
-
-Handle<Type> BinaryOpStub::GetLeftType(Isolate* isolate) const {
- return StateToType(left_state_, left_bool_, isolate);
}
-Handle<Type> BinaryOpStub::GetRightType(Isolate* isolate) const {
- return StateToType(right_state_, right_bool_, isolate);
-}
+#undef __
-Handle<Type> BinaryOpStub::GetResultType(Isolate* isolate) const {
- if (HasSideEffects(isolate)) return StateToType(NONE, false, isolate);
- if (result_state_ == GENERIC && op_ == Token::ADD) {
- return handle(Type::Union(handle(Type::Number(), isolate),
- handle(Type::String(), isolate)), isolate);
- }
- ASSERT(result_state_ != GENERIC);
- if (result_state_ == NUMBER && op_ == Token::SHR) {
- return handle(Type::Unsigned32(), isolate);
+void BinaryOpStub::PrintName(StringStream* stream) {
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+ stream->Add("BinaryOpStub_%s_%s_%s+%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(left_type_),
+ BinaryOpIC::GetName(right_type_));
+}
+
+
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
+ GenerateBothStringStub(masm);
+ return;
}
- return StateToType(result_state_, false, isolate);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // BinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
}
virtual void PrintName(StringStream* stream);
- // Returns a name for logging/debugging purposes.
- SmartArrayPointer<const char> GetName();
-
protected:
static bool CanUseFPRegisters();
// a fixed (non-moveable) code object.
virtual bool NeedsImmovableCode() { return false; }
+ // Returns a name for logging/debugging purposes.
+ SmartArrayPointer<const char> GetName();
virtual void PrintBaseName(StringStream* stream);
virtual void PrintState(StringStream* stream) { }
};
-class BinaryOpStub: public HydrogenCodeStub {
+class BinaryOpStub: public PlatformCodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
- : HydrogenCodeStub(UNINITIALIZED), op_(op), mode_(mode) {
- ASSERT(op <= LAST_TOKEN && op >= FIRST_TOKEN);
+ : op_(op),
+ mode_(mode),
+ platform_specific_bit_(false),
+ left_type_(BinaryOpIC::UNINITIALIZED),
+ right_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED),
+ encoded_right_arg_(false, encode_arg_value(1)) {
Initialize();
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- explicit BinaryOpStub(Code::ExtraICState state)
- : op_(decode_token(OpBits::decode(state))),
- mode_(OverwriteModeField::decode(state)),
- fixed_right_arg_(
- Maybe<int>(HasFixedRightArgBits::decode(state),
- decode_arg_value(FixedRightArgValueBits::decode(state)))),
- left_state_(LeftStateField::decode(state)),
- left_bool_(LeftBoolField::decode(state)),
- right_state_(fixed_right_arg_.has_value
- ? ((fixed_right_arg_.value <= Smi::kMaxValue) ? SMI : INT32)
- : RightStateField::decode(state)),
- right_bool_(fixed_right_arg_.has_value
- ? false : RightBoolField::decode(state)),
- result_state_(ResultStateField::decode(state)) {
- // We don't deserialize the SSE2 Field, since this is only used to be able
- // to include SSE2 as well as non-SSE2 versions in the snapshot. For code
- // generation we always want it to reflect the current state.
- ASSERT(!fixed_right_arg_.has_value ||
- can_encode_arg_value(fixed_right_arg_.value));
- }
-
- static const int FIRST_TOKEN = Token::BIT_OR;
- static const int LAST_TOKEN = Token::MOD;
+ BinaryOpStub(
+ int key,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ BinaryOpIC::TypeInfo result_type,
+ Maybe<int32_t> fixed_right_arg)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ platform_specific_bit_(PlatformSpecificBits::decode(key)),
+ left_type_(left_type),
+ right_type_(right_type),
+ result_type_(result_type),
+ encoded_right_arg_(fixed_right_arg.has_value,
+ encode_arg_value(fixed_right_arg.value)) { }
- static void GenerateAheadOfTime(Isolate* isolate);
- virtual void InitializeInterfaceDescriptor(
- Isolate* isolate, CodeStubInterfaceDescriptor* descriptor);
- static void InitializeForIsolate(Isolate* isolate) {
- BinaryOpStub binopStub(UNINITIALIZED);
- binopStub.InitializeInterfaceDescriptor(
- isolate, isolate->code_stub_interface_descriptor(CodeStub::BinaryOp));
+ static void decode_types_from_minor_key(int minor_key,
+ BinaryOpIC::TypeInfo* left_type,
+ BinaryOpIC::TypeInfo* right_type,
+ BinaryOpIC::TypeInfo* result_type) {
+ *left_type =
+ static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key));
+ *right_type =
+ static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key));
+ *result_type =
+ static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key));
}
- virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; }
- virtual InlineCacheState GetICState() {
- if (Max(left_state_, right_state_) == NONE && !left_bool_ && !right_bool_) {
- return ::v8::internal::UNINITIALIZED;
- }
- if (Max(left_state_, right_state_) == GENERIC) return MEGAMORPHIC;
- return MONOMORPHIC;
+ static Token::Value decode_op_from_minor_key(int minor_key) {
+ return static_cast<Token::Value>(OpBits::decode(minor_key));
}
- virtual Code::ExtraICState GetExtraICState() {
- bool sse_field = Max(result_state_, Max(left_state_, right_state_)) > SMI &&
- CpuFeatures::IsSafeForSnapshot(SSE2);
-
- return OpBits::encode(encode_token(op_))
- | LeftStateField::encode(left_state_)
- | LeftBoolField::encode(left_bool_)
- | RightStateField::encode(fixed_right_arg_.has_value
- ? NONE : right_state_)
- | RightBoolField::encode(fixed_right_arg_.has_value
- ? false
- : right_bool_)
- | ResultStateField::encode(result_state_)
- | HasFixedRightArgBits::encode(fixed_right_arg_.has_value)
- | FixedRightArgValueBits::encode(fixed_right_arg_.has_value
- ? encode_arg_value(
- fixed_right_arg_.value)
- : 0)
- | SSE2Field::encode(sse_field)
- | OverwriteModeField::encode(mode_);
- }
-
- bool CanReuseDoubleBox() {
- return result_state_ <= NUMBER && result_state_ > SMI &&
- ((left_state_ > SMI && left_state_ <= NUMBER &&
- mode_ == OVERWRITE_LEFT) ||
- (right_state_ > SMI && right_state_ <= NUMBER &&
- mode_ == OVERWRITE_RIGHT));
- }
-
- bool HasSideEffects(Isolate* isolate) const {
- return GetLeftType(isolate)->Maybe(Type::Receiver()) ||
- GetRightType(isolate)->Maybe(Type::Receiver());
+ static Maybe<int> decode_fixed_right_arg_from_minor_key(int minor_key) {
+ return Maybe<int>(
+ HasFixedRightArgBits::decode(minor_key),
+ decode_arg_value(FixedRightArgValueBits::decode(minor_key)));
}
- virtual Handle<Code> GenerateCode(Isolate* isolate);
+ int fixed_right_arg_value() const {
+ return decode_arg_value(encoded_right_arg_.value);
+ }
- Maybe<Handle<Object> > Result(Handle<Object> left,
- Handle<Object> right,
- Isolate* isolate);
+ static bool can_encode_arg_value(int32_t value) {
+ return value > 0 &&
+ IsPowerOf2(value) &&
+ FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
+ }
- Token::Value operation() const { return op_; }
- OverwriteMode mode() const { return mode_; }
- Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
- Handle<Type> GetLeftType(Isolate* isolate) const;
- Handle<Type> GetRightType(Isolate* isolate) const;
- Handle<Type> GetResultType(Isolate* isolate) const;
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool platform_specific_bit_; // Indicates SSE3 on IA32.
- void UpdateStatus(Handle<Object> left,
- Handle<Object> right,
- Maybe<Handle<Object> > result);
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo left_type_;
+ BinaryOpIC::TypeInfo right_type_;
+ BinaryOpIC::TypeInfo result_type_;
- void PrintState(StringStream* stream);
+ Maybe<int> encoded_right_arg_;
- private:
- explicit BinaryOpStub(InitializationState state) : HydrogenCodeStub(state),
- op_(Token::ADD),
- mode_(NO_OVERWRITE) {
- Initialize();
+ static int encode_arg_value(int32_t value) {
+ ASSERT(can_encode_arg_value(value));
+ return WhichPowerOf2(value);
}
- void Initialize();
- enum State { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
-
- // We truncate the last bit of the token.
- STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 5));
- class LeftStateField: public BitField<State, 0, 3> {};
- class LeftBoolField: public BitField<bool, 3, 1> {};
- // When fixed right arg is set, we don't need to store the right state.
- // Thus the two fields can overlap.
- class HasFixedRightArgBits: public BitField<bool, 4, 1> {};
- class FixedRightArgValueBits: public BitField<int, 5, 4> {};
- class RightStateField: public BitField<State, 5, 3> {};
- class RightBoolField: public BitField<bool, 8, 1> {};
- class ResultStateField: public BitField<State, 9, 3> {};
- class SSE2Field: public BitField<bool, 12, 1> {};
- class OverwriteModeField: public BitField<OverwriteMode, 13, 2> {};
- class OpBits: public BitField<int, 15, 5> {};
-
- virtual CodeStub::Major MajorKey() { return BinaryOp; }
- virtual int NotMissMinorKey() { return GetExtraICState(); }
+ static int32_t decode_arg_value(int value) {
+ return 1 << value;
+ }
- static Handle<Type> StateToType(State state,
- bool seen_bool,
- Isolate* isolate);
+ virtual void PrintName(StringStream* stream);
- static void Generate(Token::Value op,
- State left,
- State right,
- State result,
- Isolate* isolate);
+ // Minor key encoding in all 25 bits FFFFFHTTTRRRLLLPOOOOOOOMM.
+ // Note: We actually do not need 7 bits for the operation, just 4 bits to
+ // encode ADD, SUB, MUL, DIV, MOD, BIT_OR, BIT_AND, BIT_XOR, SAR, SHL, SHR.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class PlatformSpecificBits: public BitField<bool, 9, 1> {};
+ class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+ class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
+ class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
+ class HasFixedRightArgBits: public BitField<bool, 19, 1> {};
+ class FixedRightArgValueBits: public BitField<int, 20, 5> {};
+
+ Major MajorKey() { return BinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | PlatformSpecificBits::encode(platform_specific_bit_)
+ | LeftTypeBits::encode(left_type_)
+ | RightTypeBits::encode(right_type_)
+ | ResultTypeBits::encode(result_type_)
+ | HasFixedRightArgBits::encode(encoded_right_arg_.has_value)
+ | FixedRightArgValueBits::encode(encoded_right_arg_.value);
+ }
- void UpdateStatus(Handle<Object> object,
- State* state,
- bool* bool_state);
- bool can_encode_arg_value(int32_t value) const;
- int encode_arg_value(int32_t value) const;
- int32_t decode_arg_value(int value) const;
- int encode_token(Token::Value op) const;
- Token::Value decode_token(int op) const;
+ // Platform-independent implementation.
+ void Generate(MacroAssembler* masm);
+ void GenerateCallRuntime(MacroAssembler* masm);
- bool has_int_result() const {
- return op_ == Token::BIT_XOR || op_ == Token::BIT_AND ||
- op_ == Token::BIT_OR || op_ == Token::SAR;
- }
+ // Platform-independent signature, platform-specific implementation.
+ void Initialize();
+ void GenerateAddStrings(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateNumberStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+
+ // Entirely platform-specific methods are defined as static helper
+ // functions in the <arch>/code-stubs-<arch>.cc files.
- const char* StateToName(State state);
+ virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; }
- void PrintBaseName(StringStream* stream);
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(Max(left_type_, right_type_));
+ }
- Token::Value op_;
- OverwriteMode mode_;
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_stub_info(MinorKey());
+ }
- Maybe<int> fixed_right_arg_;
- State left_state_;
- bool left_bool_;
- State right_state_;
- bool right_bool_;
- State result_state_;
+ friend class CodeGenerator;
};
DestinationRegisterBits::encode(destination.code_) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) |
- SkipFastPathBits::encode(skip_fastpath) |
- SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
- CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
+ SkipFastPathBits::encode(skip_fastpath);
}
Register source() {
public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
class SkipFastPathBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT
- class SSEBits:
- public BitField<int, 2 * kBitsPerRegisterNumber + 5, 2> {}; // NOLINT
Major MajorKey() { return DoubleToI; }
int MinorKey() { return bit_field_; }
void SetFlag(Flag f) { flags_ |= (1 << f); }
void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
- void CopyFlag(Flag f, HValue* other) {
- if (other->CheckFlag(f)) SetFlag(f);
- }
// Returns true if the flag specified is set for all uses, false otherwise.
bool CheckUsesForFlag(Flag f) const;
bool returns_original_input,
CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
- Handle<Type> info = expr->type();
+ TypeInfo info = expr->type();
Representation rep = Representation::FromType(info);
if (rep.IsNone() || rep.IsTagged()) {
rep = Representation::Smi();
Handle<Type> left_type,
Handle<Type> right_type,
Handle<Type> result_type,
- Maybe<int> fixed_right_arg,
- bool binop_stub) {
+ Maybe<int> fixed_right_arg) {
Representation left_rep = Representation::FromType(left_type);
Representation right_rep = Representation::FromType(right_type);
right_rep = Representation::FromType(right_type);
}
- if (binop_stub) {
- left = EnforceNumberType(left, left_type);
- right = EnforceNumberType(right, right_type);
- }
-
Representation result_rep = Representation::FromType(result_type);
- bool is_non_primitive = (left_rep.IsTagged() && !left_rep.IsSmi()) ||
- (right_rep.IsTagged() && !right_rep.IsSmi());
- bool is_string_add = op == Token::ADD &&
- (left_type->Is(Type::String()) ||
- right_type->Is(Type::String()));
+ bool is_string_add = op == Token::ADD &&
+ (left_type->Is(Type::String()) ||
+ right_type->Is(Type::String()));
HInstruction* instr = NULL;
- // Only the stub is allowed to call into the runtime, since otherwise we would
- // inline several instructions (including the two pushes) for every tagged
- // operation in optimized code, which is more expensive, than a stub call.
- if (binop_stub && is_non_primitive && !is_string_add) {
- HValue* function = AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op));
- Add<HPushArgument>(left);
- Add<HPushArgument>(right);
- instr = NewUncasted<HInvokeFunction>(function, 2);
- } else {
- switch (op) {
- case Token::ADD:
- if (is_string_add) {
- StringAddFlags flags = STRING_ADD_CHECK_BOTH;
- if (left_type->Is(Type::String())) {
- BuildCheckHeapObject(left);
- AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
- flags = STRING_ADD_CHECK_RIGHT;
- }
- if (right_type->Is(Type::String())) {
- BuildCheckHeapObject(right);
- AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
- flags = (flags == STRING_ADD_CHECK_BOTH)
- ? STRING_ADD_CHECK_LEFT : STRING_ADD_CHECK_NONE;
- }
- instr = NewUncasted<HStringAdd>(left, right, flags);
- } else {
- instr = NewUncasted<HAdd>(left, right);
+ switch (op) {
+ case Token::ADD:
+ if (is_string_add) {
+ StringAddFlags flags = STRING_ADD_CHECK_BOTH;
+ if (left_type->Is(Type::String())) {
+ BuildCheckHeapObject(left);
+ AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
+ flags = STRING_ADD_CHECK_RIGHT;
}
- break;
- case Token::SUB:
- instr = NewUncasted<HSub>(left, right);
- break;
- case Token::MUL:
- instr = NewUncasted<HMul>(left, right);
- break;
- case Token::MOD:
- instr = NewUncasted<HMod>(left, right, fixed_right_arg);
- break;
- case Token::DIV:
- instr = NewUncasted<HDiv>(left, right);
- break;
- case Token::BIT_XOR:
- case Token::BIT_AND:
- instr = NewUncasted<HBitwise>(op, left, right);
- break;
- case Token::BIT_OR: {
- HValue* operand, *shift_amount;
- if (left_type->Is(Type::Signed32()) &&
- right_type->Is(Type::Signed32()) &&
- MatchRotateRight(left, right, &operand, &shift_amount)) {
- instr = NewUncasted<HRor>(operand, shift_amount);
- } else {
- instr = NewUncasted<HBitwise>(op, left, right);
+ if (right_type->Is(Type::String())) {
+ BuildCheckHeapObject(right);
+ AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
+ flags = (flags == STRING_ADD_CHECK_BOTH)
+ ? STRING_ADD_CHECK_LEFT : STRING_ADD_CHECK_NONE;
}
- break;
+ instr = NewUncasted<HStringAdd>(left, right, flags);
+ } else {
+ instr = NewUncasted<HAdd>(left, right);
}
- case Token::SAR:
- instr = NewUncasted<HSar>(left, right);
- break;
- case Token::SHR:
- instr = NewUncasted<HShr>(left, right);
- if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
- CanBeZero(right)) {
- graph()->RecordUint32Instruction(instr);
- }
- break;
- case Token::SHL:
- instr = NewUncasted<HShl>(left, right);
- break;
- default:
- UNREACHABLE();
+ break;
+ case Token::SUB:
+ instr = NewUncasted<HSub>(left, right);
+ break;
+ case Token::MUL:
+ instr = NewUncasted<HMul>(left, right);
+ break;
+ case Token::MOD:
+ instr = NewUncasted<HMod>(left, right, fixed_right_arg);
+ break;
+ case Token::DIV:
+ instr = NewUncasted<HDiv>(left, right);
+ break;
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ instr = NewUncasted<HBitwise>(op, left, right);
+ break;
+ case Token::BIT_OR: {
+ HValue* operand, *shift_amount;
+ if (left_type->Is(Type::Signed32()) &&
+ right_type->Is(Type::Signed32()) &&
+ MatchRotateRight(left, right, &operand, &shift_amount)) {
+ instr = NewUncasted<HRor>(operand, shift_amount);
+ } else {
+ instr = NewUncasted<HBitwise>(op, left, right);
+ }
+ break;
}
+ case Token::SAR:
+ instr = NewUncasted<HSar>(left, right);
+ break;
+ case Token::SHR:
+ instr = NewUncasted<HShr>(left, right);
+ if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
+ CanBeZero(right)) {
+ graph()->RecordUint32Instruction(instr);
+ }
+ break;
+ case Token::SHL:
+ instr = NewUncasted<HShl>(left, right);
+ break;
+ default:
+ UNREACHABLE();
}
if (instr->IsBinaryOperation()) {
binop->set_observed_input_representation(1, left_rep);
binop->set_observed_input_representation(2, right_rep);
binop->initialize_output_representation(result_rep);
- if (binop_stub) {
- // Stub should not call into stub.
- instr->SetFlag(HValue::kCannotBeTagged);
- // And should truncate on HForceRepresentation already.
- if (left->IsForceRepresentation()) {
- left->CopyFlag(HValue::kTruncatingToSmi, instr);
- left->CopyFlag(HValue::kTruncatingToInt32, instr);
- }
- if (right->IsForceRepresentation()) {
- right->CopyFlag(HValue::kTruncatingToSmi, instr);
- right->CopyFlag(HValue::kTruncatingToInt32, instr);
- }
- }
}
return instr;
}
Handle<Type> left_type,
Handle<Type> right_type,
Handle<Type> result_type,
- Maybe<int> fixed_right_arg,
- bool binop_stub = false);
+ Maybe<int> fixed_right_arg);
HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
}
-void BinaryOpStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { edx, eax };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
-}
-
-
#define __ ACCESS_MASM(masm)
// on FPU stack.
static void LoadFloatOperand(MacroAssembler* masm, Register number);
+ // Code pattern for loading floating point values. Input values must
+ // be either smi or heap number objects (fp values). Requirements:
+ // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
+ // Returns operands as floating point numbers on FPU stack.
+ static void LoadFloatOperands(MacroAssembler* masm,
+ Register scratch,
+ ArgLocation arg_location = ARGS_ON_STACK);
+
+ // Similar to LoadFloatOperand but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
+
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in eax, operand_2 in edx; falls through on float
// operands, jumps to the non_float label otherwise.
Label* non_float,
Register scratch);
+ // Takes the operands in edx and eax and loads them as integers in eax
+ // and ecx.
+ static void LoadUnknownsAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ Label* operand_conversion_failure);
+
// Test if operands are numbers (smi or HeapNumber objects), and load
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
+
+ // Similar to LoadSSE2Operands but assumes that both operands are smis.
+ // Expects operands in edx, eax.
+ static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
+
+ // Checks that |operand| has an int32 value. If |int32_result| is different
+ // from |scratch|, it will contain that int32 value.
+ static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
+ Label* non_int32,
+ XMMRegister operand,
+ Register int32_result,
+ Register scratch,
+ XMMRegister xmm_scratch);
};
}
+void BinaryOpStub::Initialize() {
+ platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
+}
+
+
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ __ pop(ecx); // Save return address.
+ __ push(edx);
+ __ push(eax);
+ // Left and right arguments are now on top.
+ __ push(Immediate(Smi::FromInt(MinorKey())));
+
+ __ push(ecx); // Push return address.
+
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+ masm->isolate()),
+ 3,
+ 1);
+}
+
+
+// Prepare for a type transition runtime call when the args are already on
+// the stack, under the return address.
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
+ __ pop(ecx); // Save return address.
+ // Left and right arguments are already on top of the stack.
+ __ push(Immediate(Smi::FromInt(MinorKey())));
+
+ __ push(ecx); // Push return address.
+
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+ masm->isolate()),
+ 3,
+ 1);
+}
+
+
+static void BinaryOpStub_GenerateRegisterArgsPop(MacroAssembler* masm) {
+ __ pop(ecx);
+ __ pop(eax);
+ __ pop(edx);
+ __ push(ecx);
+}
+
+
+static void BinaryOpStub_GenerateSmiCode(
+ MacroAssembler* masm,
+ Label* slow,
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ Token::Value op) {
+ // 1. Move arguments into edx, eax except for DIV and MOD, which need the
+ // dividend in eax and edx free for the division. Use eax, ebx for those.
+ Comment load_comment(masm, "-- Load arguments");
+ Register left = edx;
+ Register right = eax;
+ if (op == Token::DIV || op == Token::MOD) {
+ left = eax;
+ right = ebx;
+ __ mov(ebx, eax);
+ __ mov(eax, edx);
+ }
+
+
+ // 2. Prepare the smi check of both operands by oring them together.
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ Label not_smis;
+ Register combined = ecx;
+ ASSERT(!left.is(combined) && !right.is(combined));
+ switch (op) {
+ case Token::BIT_OR:
+ // Perform the operation into eax and smi check the result. Preserve
+ // eax in case the result is not a smi.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, left); // Bitwise or is commutative.
+ combined = right;
+ break;
+
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ __ mov(combined, right);
+ __ or_(combined, left);
+ break;
+
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Move the right operand into ecx for the shift operation, use eax
+ // for the smi check register.
+ ASSERT(!left.is(ecx) && !right.is(ecx));
+ __ mov(ecx, right);
+ __ or_(right, left);
+ combined = right;
+ break;
+
+ default:
+ break;
+ }
+
+ // 3. Perform the smi check of the operands.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
+ __ JumpIfNotSmi(combined, ¬_smis);
+
+ // 4. Operands are both smis, perform the operation leaving the result in
+ // eax and check the result if necessary.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ Label use_fp_on_smis;
+ switch (op) {
+ case Token::BIT_OR:
+ // Nothing to do.
+ break;
+
+ case Token::BIT_XOR:
+ ASSERT(right.is(eax));
+ __ xor_(right, left); // Bitwise xor is commutative.
+ break;
+
+ case Token::BIT_AND:
+ ASSERT(right.is(eax));
+ __ and_(right, left); // Bitwise and is commutative.
+ break;
+
+ case Token::SHL:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shl_cl(left);
+ // Check that the *signed* result fits in a smi.
+ __ cmp(left, 0xc0000000);
+ __ j(sign, &use_fp_on_smis);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SAR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ sar_cl(left);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::SHR:
+ // Remove tags from operands (but keep sign).
+ __ SmiUntag(left);
+ __ SmiUntag(ecx);
+ // Perform the operation.
+ __ shr_cl(left);
+ // Check that the *unsigned* result fits in a smi.
+ // Neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging.
+ // - 0x40000000: this number would convert to negative when
+ // Smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi.
+ __ test(left, Immediate(0xc0000000));
+ __ j(not_zero, &use_fp_on_smis);
+ // Tag the result and store it in register eax.
+ __ SmiTag(left);
+ __ mov(eax, left);
+ break;
+
+ case Token::ADD:
+ ASSERT(right.is(eax));
+ __ add(right, left); // Addition is commutative.
+ __ j(overflow, &use_fp_on_smis);
+ break;
+
+ case Token::SUB:
+ __ sub(left, right);
+ __ j(overflow, &use_fp_on_smis);
+ __ mov(eax, left);
+ break;
+
+ case Token::MUL:
+ // If the smi tag is 0 we can just leave the tag on one operand.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
+ // We can't revert the multiplication if the result is not a smi
+ // so save the right operand.
+ __ mov(ebx, right);
+ // Remove tag from one of the operands (but keep sign).
+ __ SmiUntag(right);
+ // Do multiplication.
+ __ imul(right, left); // Multiplication is commutative.
+ __ j(overflow, &use_fp_on_smis);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(right, combined, &use_fp_on_smis);
+ break;
+
+ case Token::DIV:
+ // We can't revert the division if the result is not a smi so
+ // save the left operand.
+ __ mov(edi, left);
+ // Check for 0 divisor.
+ __ test(right, right);
+ __ j(zero, &use_fp_on_smis);
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for the corner case of dividing the most negative smi by
+ // -1. We cannot use the overflow flag, since it is not set by idiv
+ // instruction.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ cmp(eax, 0x40000000);
+ __ j(equal, &use_fp_on_smis);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
+ // Check that the remainder is zero.
+ __ test(edx, edx);
+ __ j(not_zero, &use_fp_on_smis);
+ // Tag the result and store it in register eax.
+ __ SmiTag(eax);
+ break;
+
+ case Token::MOD:
+ // Check for 0 divisor.
+ __ test(right, right);
+ __ j(zero, ¬_smis);
+
+ // Sign extend left into edx:eax.
+ ASSERT(left.is(eax));
+ __ cdq();
+ // Divide edx:eax by right.
+ __ idiv(right);
+ // Check for negative zero result. Use combined = left | right.
+ __ NegativeZeroTest(edx, combined, slow);
+ // Move remainder to register eax.
+ __ mov(eax, edx);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ // 5. Emit return of result in eax. Some operations have registers pushed.
+ switch (op) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ __ ret(0);
+ break;
+ case Token::MOD:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR:
+ __ ret(2 * kPointerSize);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // 6. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
+ __ bind(&use_fp_on_smis);
+ switch (op) {
+ // Undo the effects of some operations, and some register moves.
+ case Token::SHL:
+ // The arguments are saved on the stack, and only used from there.
+ break;
+ case Token::ADD:
+ // Revert right = right + left.
+ __ sub(right, left);
+ break;
+ case Token::SUB:
+ // Revert left = left - right.
+ __ add(left, right);
+ break;
+ case Token::MUL:
+ // Right was clobbered but a copy is in ebx.
+ __ mov(right, ebx);
+ break;
+ case Token::DIV:
+ // Left was clobbered but a copy is in edi. Right is in ebx for
+ // division. They should be in eax, ebx for jump to not_smi.
+ __ mov(eax, edi);
+ break;
+ default:
+ // No other operators jump to use_fp_on_smis.
+ break;
+ }
+ __ jmp(¬_smis);
+ } else {
+ ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
+ switch (op) {
+ case Token::SHL:
+ case Token::SHR: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Result we want is in left == edx, so we can put the allocated heap
+ // number in eax.
+ __ AllocateHeapNumber(eax, ecx, ebx, slow);
+ // Store the result in the HeapNumber and return.
+ // It's OK to overwrite the arguments on the stack because we
+ // are about to return.
+ if (op == Token::SHR) {
+ __ mov(Operand(esp, 1 * kPointerSize), left);
+ __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
+ __ fild_d(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ } else {
+ ASSERT_EQ(Token::SHL, op);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ Cvtsi2sd(xmm0, left);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ mov(Operand(esp, 1 * kPointerSize), left);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ }
+ __ ret(2 * kPointerSize);
+ break;
+ }
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ __ bind(&use_fp_on_smis);
+ // Restore arguments to edx, eax.
+ switch (op) {
+ case Token::ADD:
+ // Revert right = right + left.
+ __ sub(right, left);
+ break;
+ case Token::SUB:
+ // Revert left = left - right.
+ __ add(left, right);
+ break;
+ case Token::MUL:
+ // Right was clobbered but a copy is in ebx.
+ __ mov(right, ebx);
+ break;
+ case Token::DIV:
+ // Left was clobbered but a copy is in edi. Right is in ebx for
+ // division.
+ __ mov(edx, edi);
+ __ mov(eax, right);
+ break;
+ default: UNREACHABLE();
+ break;
+ }
+ __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+ switch (op) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+ } else { // SSE2 not available, use FPU.
+ FloatingPointHelper::LoadFloatSmis(masm, ebx);
+ switch (op) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+ }
+ __ mov(eax, ecx);
+ __ ret(0);
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ // 7. Non-smi operands, fall out to the non-smi code with the operands in
+ // edx and eax.
+ Comment done_comment(masm, "-- Enter non-smi code");
+ __ bind(¬_smis);
+ switch (op) {
+ case Token::BIT_OR:
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ // Right operand is saved in ecx and eax was destroyed by the smi
+ // check.
+ __ mov(eax, ecx);
+ break;
+
+ case Token::DIV:
+ case Token::MOD:
+ // Operands are in eax, ebx at this point.
+ __ mov(edx, eax);
+ __ mov(eax, ebx);
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label right_arg_changed, call_runtime;
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ break;
+ case Token::MOD:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR:
+ GenerateRegisterArgsPush(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (op_ == Token::MOD && encoded_right_arg_.has_value) {
+ // It is guaranteed that the value will fit into a Smi, because if it
+ // didn't, we wouldn't be here, see BinaryOp_Patch.
+ __ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value())));
+ __ j(not_equal, &right_arg_changed);
+ }
+
+ if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+ result_type_ == BinaryOpIC::SMI) {
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
+ } else {
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
+ }
+
+ // Code falls through if the result is not returned as either a smi or heap
+ // number.
+ __ bind(&right_arg_changed);
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ GenerateTypeTransition(masm);
+ break;
+ case Token::MOD:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR:
+ GenerateTypeTransitionWithSavedArgs(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&call_runtime);
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ break;
+ case Token::MOD:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR:
+ BinaryOpStub_GenerateRegisterArgsPop(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edx);
+ __ push(eax);
+ GenerateCallRuntime(masm);
+ }
+ __ ret(0);
+}
+
+
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ // If both arguments are strings, call the string add stub.
+ // Otherwise, do a transition.
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &call_runtime, Label::kNear);
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &call_runtime, Label::kNear);
+
+ // Test if right operand is a string.
+ __ JumpIfSmi(right, &call_runtime, Label::kNear);
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &call_runtime, Label::kNear);
+
+ StringAddStub string_add_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&call_runtime);
+ GenerateTypeTransition(masm);
+}
+
+
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode);
+
+
+// Input:
+// edx: left operand (tagged)
+// eax: right operand (tagged)
+// Output:
+// eax: result (tagged)
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
+
+ // Floating point case.
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ Label not_floats, not_int32, right_arg_changed;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ // In theory, we would need the same check in the non-SSE2 case,
+ // but since we don't support Crankshaft on such hardware we can
+ // afford not to care about precise type feedback.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(edx, ¬_int32);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(eax, ¬_int32);
+ }
+ FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, ¬_int32, xmm0, ebx, ecx, xmm2);
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, ¬_int32, xmm1, edi, ecx, xmm2);
+ if (op_ == Token::MOD) {
+ if (encoded_right_arg_.has_value) {
+ __ cmp(edi, Immediate(fixed_right_arg_value()));
+ __ j(not_equal, &right_arg_changed);
+ }
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ } else {
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ // Check result type if it is currently Int32.
+ if (result_type_ <= BinaryOpIC::INT32) {
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, ¬_int32, xmm0, ecx, ecx, xmm2);
+ }
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ ret(0);
+ }
+ } else { // SSE2 not available, use FPU.
+ FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx);
+ FloatingPointHelper::LoadFloatOperands(
+ masm,
+ ecx,
+ FloatingPointHelper::ARGS_IN_REGISTERS);
+ if (op_ == Token::MOD) {
+ // The operands are now on the FPU stack, but we don't need them.
+ __ fstp(0);
+ __ fstp(0);
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ } else {
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ Label after_alloc_failure;
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, &after_alloc_failure, mode_);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(0);
+ __ bind(&after_alloc_failure);
+ __ fstp(0); // Pop FPU stack before calling runtime.
+ __ jmp(&call_runtime);
+ }
+ }
+
+ __ bind(¬_floats);
+ __ bind(¬_int32);
+ __ bind(&right_arg_changed);
+ GenerateTypeTransition(masm);
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ GenerateRegisterArgsPush(masm);
+ Label not_floats;
+ Label not_int32;
+ Label non_smi_result;
+ bool use_sse3 = platform_specific_bit_;
+ FloatingPointHelper::LoadUnknownsAsIntegers(
+ masm, use_sse3, left_type_, right_type_, ¬_floats);
+ switch (op_) {
+ case Token::BIT_OR: __ or_(eax, ecx); break;
+ case Token::BIT_AND: __ and_(eax, ecx); break;
+ case Token::BIT_XOR: __ xor_(eax, ecx); break;
+ case Token::SAR: __ sar_cl(eax); break;
+ case Token::SHL: __ shl_cl(eax); break;
+ case Token::SHR: __ shr_cl(eax); break;
+ default: UNREACHABLE();
+ }
+ if (op_ == Token::SHR) {
+ // Check if result is non-negative and fits in a smi.
+ __ test(eax, Immediate(0xc0000000));
+ __ j(not_zero, &call_runtime);
+ } else {
+ // Check if result fits in a smi.
+ __ cmp(eax, 0xc0000000);
+ __ j(negative, &non_smi_result, Label::kNear);
+ }
+ // Tag smi result and return.
+ __ SmiTag(eax);
+ __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
+
+ // All ops except SHR return a signed int32 that we load in
+ // a HeapNumber.
+ if (op_ != Token::SHR) {
+ __ bind(&non_smi_result);
+ // Allocate a heap number if needed.
+ __ mov(ebx, eax); // ebx: result
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
+ // Fall through!
+ case NO_OVERWRITE:
+ __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ Cvtsi2sd(xmm0, ebx);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
+ }
+
+ __ bind(¬_floats);
+ __ bind(¬_int32);
+ GenerateTypeTransitionWithSavedArgs(masm);
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+
+ // If an allocation fails, or SHR hits a hard case, use the runtime system to
+ // get the correct result.
+ __ bind(&call_runtime);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ break;
+ case Token::MOD:
+ return; // Handled above.
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR:
+ BinaryOpStub_GenerateRegisterArgsPop(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edx);
+ __ push(eax);
+ GenerateCallRuntime(masm);
+ }
+ __ ret(0);
+}
+
+
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+ if (op_ == Token::ADD) {
+ // Handle string addition here, because it is the only operation
+ // that does not do a ToNumber conversion on the operands.
+ GenerateAddStrings(masm);
+ }
+
+ Factory* factory = masm->isolate()->factory();
+
+ // Convert odd ball arguments to numbers.
+ Label check, done;
+ __ cmp(edx, factory->undefined_value());
+ __ j(not_equal, &check, Label::kNear);
+ if (Token::IsBitOp(op_)) {
+ __ xor_(edx, edx);
+ } else {
+ __ mov(edx, Immediate(factory->nan_value()));
+ }
+ __ jmp(&done, Label::kNear);
+ __ bind(&check);
+ __ cmp(eax, factory->undefined_value());
+ __ j(not_equal, &done, Label::kNear);
+ if (Token::IsBitOp(op_)) {
+ __ xor_(eax, eax);
+ } else {
+ __ mov(eax, Immediate(factory->nan_value()));
+ }
+ __ bind(&done);
+
+ GenerateNumberStub(masm);
+}
+
+
+void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
+ Label call_runtime;
+
+ // Floating point case.
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ Label not_floats;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ // In theory, we would need the same check in the non-SSE2 case,
+ // but since we don't support Crankshaft on such hardware we can
+ // afford not to care about precise type feedback.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(edx, ¬_floats);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(eax, ¬_floats);
+ }
+ FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
+ if (left_type_ == BinaryOpIC::INT32) {
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, ¬_floats, xmm0, ecx, ecx, xmm2);
+ }
+ if (right_type_ == BinaryOpIC::INT32) {
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, ¬_floats, xmm1, ecx, ecx, xmm2);
+ }
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ ret(0);
+ } else { // SSE2 not available, use FPU.
+ FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx);
+ FloatingPointHelper::LoadFloatOperands(
+ masm,
+ ecx,
+ FloatingPointHelper::ARGS_IN_REGISTERS);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ Label after_alloc_failure;
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, &after_alloc_failure, mode_);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(0);
+ __ bind(&after_alloc_failure);
+ __ fstp(0); // Pop FPU stack before calling runtime.
+ __ jmp(&call_runtime);
+ }
+
+ __ bind(¬_floats);
+ GenerateTypeTransition(masm);
+ break;
+ }
+
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ GenerateRegisterArgsPush(masm);
+ Label not_floats;
+ Label non_smi_result;
+ // We do not check the input arguments here, as any value is
+ // unconditionally truncated to an int32 anyway. To get the
+ // right optimized code, int32 type feedback is just right.
+ bool use_sse3 = platform_specific_bit_;
+ FloatingPointHelper::LoadUnknownsAsIntegers(
+ masm, use_sse3, left_type_, right_type_, ¬_floats);
+ switch (op_) {
+ case Token::BIT_OR: __ or_(eax, ecx); break;
+ case Token::BIT_AND: __ and_(eax, ecx); break;
+ case Token::BIT_XOR: __ xor_(eax, ecx); break;
+ case Token::SAR: __ sar_cl(eax); break;
+ case Token::SHL: __ shl_cl(eax); break;
+ case Token::SHR: __ shr_cl(eax); break;
+ default: UNREACHABLE();
+ }
+ if (op_ == Token::SHR) {
+ // Check if result is non-negative and fits in a smi.
+ __ test(eax, Immediate(0xc0000000));
+ __ j(not_zero, &call_runtime);
+ } else {
+ // Check if result fits in a smi.
+ __ cmp(eax, 0xc0000000);
+ __ j(negative, &non_smi_result, Label::kNear);
+ }
+ // Tag smi result and return.
+ __ SmiTag(eax);
+ __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
+
+ // All ops except SHR return a signed int32 that we load in
+ // a HeapNumber.
+ if (op_ != Token::SHR) {
+ __ bind(&non_smi_result);
+ // Allocate a heap number if needed.
+ __ mov(ebx, eax); // ebx: result
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
+ // Fall through!
+ case NO_OVERWRITE:
+ __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ Cvtsi2sd(xmm0, ebx);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
+ }
+
+ __ bind(¬_floats);
+ GenerateTypeTransitionWithSavedArgs(masm);
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+
+ // If an allocation fails, or SHR or MOD hit a hard case,
+ // use the runtime system to get the correct result.
+ __ bind(&call_runtime);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ break;
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR:
+ BinaryOpStub_GenerateRegisterArgsPop(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edx);
+ __ push(eax);
+ GenerateCallRuntime(masm);
+ }
+ __ ret(0);
+}
+
+
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+ Label call_runtime;
+
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
+
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ break;
+ case Token::MOD:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR:
+ GenerateRegisterArgsPush(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
+
+ // Floating point case.
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ Label not_floats;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
+
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ ret(0);
+ } else { // SSE2 not available, use FPU.
+ FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx);
+ FloatingPointHelper::LoadFloatOperands(
+ masm,
+ ecx,
+ FloatingPointHelper::ARGS_IN_REGISTERS);
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ Label after_alloc_failure;
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, &after_alloc_failure, mode_);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(0);
+ __ bind(&after_alloc_failure);
+ __ fstp(0); // Pop FPU stack before calling runtime.
+ __ jmp(&call_runtime);
+ }
+ __ bind(¬_floats);
+ break;
+ }
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label non_smi_result;
+ bool use_sse3 = platform_specific_bit_;
+ FloatingPointHelper::LoadUnknownsAsIntegers(masm,
+ use_sse3,
+ BinaryOpIC::GENERIC,
+ BinaryOpIC::GENERIC,
+ &call_runtime);
+ switch (op_) {
+ case Token::BIT_OR: __ or_(eax, ecx); break;
+ case Token::BIT_AND: __ and_(eax, ecx); break;
+ case Token::BIT_XOR: __ xor_(eax, ecx); break;
+ case Token::SAR: __ sar_cl(eax); break;
+ case Token::SHL: __ shl_cl(eax); break;
+ case Token::SHR: __ shr_cl(eax); break;
+ default: UNREACHABLE();
+ }
+ if (op_ == Token::SHR) {
+ // Check if result is non-negative and fits in a smi.
+ __ test(eax, Immediate(0xc0000000));
+ __ j(not_zero, &call_runtime);
+ } else {
+ // Check if result fits in a smi.
+ __ cmp(eax, 0xc0000000);
+ __ j(negative, &non_smi_result, Label::kNear);
+ }
+ // Tag smi result and return.
+ __ SmiTag(eax);
+ __ ret(2 * kPointerSize); // Drop the arguments from the stack.
+
+ // All ops except SHR return a signed int32 that we load in
+ // a HeapNumber.
+ if (op_ != Token::SHR) {
+ __ bind(&non_smi_result);
+ // Allocate a heap number if needed.
+ __ mov(ebx, eax); // ebx: result
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
+ // Fall through!
+ case NO_OVERWRITE:
+ __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope use_sse2(masm, SSE2);
+ __ Cvtsi2sd(xmm0, ebx);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ __ ret(2 * kPointerSize);
+ }
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+
+ // If all else fails, use the runtime system to get the correct
+ // result.
+ __ bind(&call_runtime);
+ switch (op_) {
+ case Token::ADD:
+ GenerateAddStrings(masm);
+ // Fall through.
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ break;
+ case Token::MOD:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR:
+ BinaryOpStub_GenerateRegisterArgsPop(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edx);
+ __ push(eax);
+ GenerateCallRuntime(masm);
+ }
+ __ ret(0);
+}
+
+
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD);
+ Label left_not_string, call_runtime;
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &left_not_string, Label::kNear);
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &left_not_string, Label::kNear);
+
+ StringAddStub string_add_left_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_left_stub);
+
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
+ __ JumpIfSmi(right, &call_runtime, Label::kNear);
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &call_runtime, Label::kNear);
+
+ StringAddStub string_add_right_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_right_stub);
+
+ // Neither argument is a string.
+ __ bind(&call_runtime);
+}
+
+
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode) {
+ Label skip_allocation;
+ switch (mode) {
+ case OVERWRITE_LEFT: {
+ // If the argument in edx is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now edx can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(edx, ebx);
+ __ bind(&skip_allocation);
+ // Use object in edx as a result holder
+ __ mov(eax, edx);
+ break;
+ }
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep eax and edx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+ // Now eax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ mov(eax, ebx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ pop(ecx);
+ __ push(edx);
+ __ push(eax);
+ __ push(ecx);
+}
+
+
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
}
+// Input: edx, eax are the left and right objects of a bit op.
+// Output: eax, ecx are left and right integers for a bit op.
+// Warning: can clobber inputs even when it jumps to |conversion_failure|!
+void FloatingPointHelper::LoadUnknownsAsIntegers(
+ MacroAssembler* masm,
+ bool use_sse3,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ Label* conversion_failure) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ // Test if arg1 is a Smi.
+ if (left_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(edx, conversion_failure);
+ } else {
+ __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
+ }
+
+ __ SmiUntag(edx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(edx, factory->undefined_value());
+ __ j(not_equal, conversion_failure);
+ __ mov(edx, Immediate(0));
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(ebx, factory->heap_number_map());
+ __ j(not_equal, &check_undefined_arg1);
+
+ __ TruncateHeapNumberToI(edx, edx);
+
+ // Here edx has the untagged integer, eax has a Smi or a heap number.
+ __ bind(&load_arg2);
+
+ // Test if arg2 is a Smi.
+ if (right_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(eax, conversion_failure);
+ } else {
+ __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
+ }
+
+ __ SmiUntag(eax);
+ __ mov(ecx, eax);
+ __ jmp(&done);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ cmp(eax, factory->undefined_value());
+ __ j(not_equal, conversion_failure);
+ __ mov(ecx, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&arg2_is_object);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(ebx, factory->heap_number_map());
+ __ j(not_equal, &check_undefined_arg2);
+ // Get the untagged integer version of the eax heap number in ecx.
+
+ __ TruncateHeapNumberToI(ecx, eax);
+
+ __ bind(&done);
+ __ mov(eax, edx);
+}
+
+
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
}
+void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ Cvtsi2sd(xmm0, scratch);
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ Cvtsi2sd(xmm1, scratch);
+}
+
+
+void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
+ Label* non_int32,
+ XMMRegister operand,
+ Register int32_result,
+ Register scratch,
+ XMMRegister xmm_scratch) {
+ __ cvttsd2si(int32_result, Operand(operand));
+ __ Cvtsi2sd(xmm_scratch, int32_result);
+ __ pcmpeqd(xmm_scratch, operand);
+ __ movmskps(scratch, xmm_scratch);
+ // Two least significant bits should be both set.
+ __ not_(scratch);
+ __ test(scratch, Immediate(3));
+ __ j(not_zero, non_int32);
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+ Register scratch,
+ ArgLocation arg_location) {
+ Label load_smi_1, load_smi_2, done_load_1, done;
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, edx);
+ } else {
+ __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ }
+ __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
+ __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ bind(&done_load_1);
+
+ if (arg_location == ARGS_IN_REGISTERS) {
+ __ mov(scratch, eax);
+ } else {
+ __ mov(scratch, Operand(esp, 1 * kPointerSize));
+ }
+ __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
+ __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&load_smi_1);
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+ __ jmp(&done_load_1);
+
+ __ bind(&load_smi_2);
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
+ Register scratch) {
+ const Register left = edx;
+ const Register right = eax;
+ __ mov(scratch, left);
+ ASSERT(!scratch.is(right)); // We're about to clobber scratch.
+ __ SmiUntag(scratch);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+
+ __ mov(scratch, right);
+ __ SmiUntag(scratch);
+ __ mov(Operand(esp, 0), scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+}
+
+
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch) {
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
- PlatformFeatureScope sse2(SSE2);
- BinaryOpStub::GenerateAheadOfTime(isolate);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- if (!CpuFeatures::IsSupported(SSE2)) return;
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
Label::Distance dst) {
Label done;
sub(esp, Immediate(kPointerSize));
- fld(0);
fist_s(MemOperand(esp, 0));
+ fld(0);
fild_s(MemOperand(esp, 0));
pop(result_reg);
FCmp();
void MacroAssembler::LoadUint32(XMMRegister dst,
Register src,
XMMRegister scratch) {
- ASSERT(!Serializer::enabled());
Label done;
cmp(src, Immediate(0));
movdbl(scratch,
}
+void BinaryOpIC::patch(Code* code) {
+ set_target(code);
+}
+
+
const char* BinaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED: return "Uninitialized";
}
-MaybeObject* BinaryOpIC::Transition(Handle<Object> left, Handle<Object> right) {
- Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state();
- BinaryOpStub stub(extra_ic_state);
-
- bool smi_was_enabled = stub.GetLeftType(isolate())->Maybe(Type::Smi()) &&
- stub.GetRightType(isolate())->Maybe(Type::Smi());
+BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
+ switch (type_info) {
+ case UNINITIALIZED:
+ return ::v8::internal::UNINITIALIZED;
+ case SMI:
+ case INT32:
+ case NUMBER:
+ case ODDBALL:
+ case STRING:
+ return MONOMORPHIC;
+ case GENERIC:
+ return ::v8::internal::GENERIC;
+ }
+ UNREACHABLE();
+ return ::v8::internal::UNINITIALIZED;
+}
- Maybe<Handle<Object> > result = stub.Result(left, right, isolate());
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- char buffer[100];
- NoAllocationStringAllocator allocator(buffer,
- static_cast<unsigned>(sizeof(buffer)));
- StringStream stream(&allocator);
- stream.Add("[");
- stub.PrintName(&stream);
-
- stub.UpdateStatus(left, right, result);
-
- stream.Add(" => ");
- stub.PrintState(&stream);
- stream.Add(" ");
- stream.OutputToStdOut();
- PrintF(" @ %p <- ", static_cast<void*>(*stub.GetCode(isolate())));
- JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
- PrintF("]\n");
- } else {
- stub.UpdateStatus(left, right, result);
+Handle<Type> BinaryOpIC::TypeInfoToType(BinaryOpIC::TypeInfo binary_type,
+ Isolate* isolate) {
+ switch (binary_type) {
+ case UNINITIALIZED:
+ return handle(Type::None(), isolate);
+ case SMI:
+ return handle(Type::Smi(), isolate);
+ case INT32:
+ return handle(Type::Signed32(), isolate);
+ case NUMBER:
+ return handle(Type::Number(), isolate);
+ case ODDBALL:
+ return handle(Type::Optional(
+ handle(Type::Union(
+ handle(Type::Number(), isolate),
+ handle(Type::String(), isolate)), isolate)), isolate);
+ case STRING:
+ return handle(Type::String(), isolate);
+ case GENERIC:
+ return handle(Type::Any(), isolate);
}
-#else
- stub.UpdateStatus(left, right, result);
-#endif
-
- Handle<Code> code = stub.GetCode(isolate());
- set_target(*code);
+ UNREACHABLE();
+ return handle(Type::Any(), isolate);
+}
+
+
+void BinaryOpIC::StubInfoToType(int minor_key,
+ Handle<Type>* left,
+ Handle<Type>* right,
+ Handle<Type>* result,
+ Isolate* isolate) {
+ TypeInfo left_typeinfo, right_typeinfo, result_typeinfo;
+ BinaryOpStub::decode_types_from_minor_key(
+ minor_key, &left_typeinfo, &right_typeinfo, &result_typeinfo);
+ *left = TypeInfoToType(left_typeinfo, isolate);
+ *right = TypeInfoToType(right_typeinfo, isolate);
+ *result = TypeInfoToType(result_typeinfo, isolate);
+}
+
+
+static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
+ Token::Value op) {
+ v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(value);
+ if (type.IsSmi()) return BinaryOpIC::SMI;
+ if (type.IsInteger32()) {
+ if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
+ return BinaryOpIC::INT32;
+ }
+ if (type.IsNumber()) return BinaryOpIC::NUMBER;
+ if (type.IsString()) return BinaryOpIC::STRING;
+ if (value->IsUndefined()) {
+ if (op == Token::BIT_AND ||
+ op == Token::BIT_OR ||
+ op == Token::BIT_XOR ||
+ op == Token::SAR ||
+ op == Token::SHL ||
+ op == Token::SHR) {
+ if (SmiValuesAre32Bits()) return BinaryOpIC::SMI;
+ return BinaryOpIC::INT32;
+ }
+ return BinaryOpIC::ODDBALL;
+ }
+ return BinaryOpIC::GENERIC;
+}
- bool enable_smi = stub.GetLeftType(isolate())->Maybe(Type::Smi()) &&
- stub.GetRightType(isolate())->Maybe(Type::Smi());
- if (!smi_was_enabled && enable_smi) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- } else if (smi_was_enabled && !enable_smi) {
- PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
+static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type,
+ Handle<Object> value,
+ Token::Value op) {
+ BinaryOpIC::TypeInfo new_type = TypeInfoFromValue(value, op);
+ if (old_type == BinaryOpIC::STRING) {
+ if (new_type == BinaryOpIC::STRING) return new_type;
+ return BinaryOpIC::GENERIC;
}
+ return Max(old_type, new_type);
+}
- return result.has_value
- ? static_cast<MaybeObject*>(*result.value)
- : Failure::Exception();
+
+#ifdef DEBUG
+static void TraceBinaryOp(BinaryOpIC::TypeInfo left,
+ BinaryOpIC::TypeInfo right,
+ Maybe<int32_t> fixed_right_arg,
+ BinaryOpIC::TypeInfo result) {
+ PrintF("%s*%s", BinaryOpIC::GetName(left), BinaryOpIC::GetName(right));
+ if (fixed_right_arg.has_value) PrintF("{%d}", fixed_right_arg.value);
+ PrintF("->%s", BinaryOpIC::GetName(result));
}
+#endif
+
+RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
+ ASSERT(args.length() == 3);
-RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss) {
HandleScope scope(isolate);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
- BinaryOpIC ic(isolate);
- return ic.Transition(left, right);
+ int key = args.smi_at(2);
+ Token::Value op = BinaryOpStub::decode_op_from_minor_key(key);
+
+ BinaryOpIC::TypeInfo previous_left, previous_right, previous_result;
+ BinaryOpStub::decode_types_from_minor_key(
+ key, &previous_left, &previous_right, &previous_result);
+
+ BinaryOpIC::TypeInfo new_left = InputState(previous_left, left, op);
+ BinaryOpIC::TypeInfo new_right = InputState(previous_right, right, op);
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
+
+ // STRING is only used for ADD operations.
+ if ((new_left == BinaryOpIC::STRING || new_right == BinaryOpIC::STRING) &&
+ op != Token::ADD) {
+ new_left = new_right = BinaryOpIC::GENERIC;
+ }
+
+ BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right);
+ BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right);
+
+ Maybe<int> previous_fixed_right_arg =
+ BinaryOpStub::decode_fixed_right_arg_from_minor_key(key);
+
+ int32_t value;
+ bool new_has_fixed_right_arg =
+ op == Token::MOD &&
+ right->ToInt32(&value) &&
+ BinaryOpStub::can_encode_arg_value(value) &&
+ (previous_overall == BinaryOpIC::UNINITIALIZED ||
+ (previous_fixed_right_arg.has_value &&
+ previous_fixed_right_arg.value == value));
+ Maybe<int32_t> new_fixed_right_arg(
+ new_has_fixed_right_arg, new_has_fixed_right_arg ? value : 1);
+
+ if (previous_fixed_right_arg.has_value == new_fixed_right_arg.has_value) {
+ if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
+ if (op == Token::DIV ||
+ op == Token::MUL ||
+ op == Token::SHR ||
+ SmiValuesAre32Bits()) {
+ // Arithmetic on two Smi inputs has yielded a heap number.
+ // That is the only way to get here from the Smi stub.
+ // With 32-bit Smis, all overflows give heap numbers, but with
+ // 31-bit Smis, most operations overflow to int32 results.
+ result_type = BinaryOpIC::NUMBER;
+ } else {
+ // Other operations on SMIs that overflow yield int32s.
+ result_type = BinaryOpIC::INT32;
+ }
+ }
+ if (new_overall == BinaryOpIC::INT32 &&
+ previous_overall == BinaryOpIC::INT32) {
+ if (new_left == previous_left && new_right == previous_right) {
+ result_type = BinaryOpIC::NUMBER;
+ }
+ }
+ }
+
+ BinaryOpStub stub(key, new_left, new_right, result_type, new_fixed_right_arg);
+ Handle<Code> code = stub.GetCode(isolate);
+ if (!code.is_null()) {
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[BinaryOpIC in ");
+ JavaScriptFrame::PrintTop(isolate, stdout, false, true);
+ PrintF(" ");
+ TraceBinaryOp(previous_left, previous_right, previous_fixed_right_arg,
+ previous_result);
+ PrintF(" => ");
+ TraceBinaryOp(new_left, new_right, new_fixed_right_arg, result_type);
+ PrintF(" #%s @ %p]\n", Token::Name(op), static_cast<void*>(*code));
+ }
+#endif
+ BinaryOpIC ic(isolate);
+ ic.patch(*code);
+
+ // Activate inlined smi code.
+ if (previous_overall == BinaryOpIC::UNINITIALIZED) {
+ PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
+ }
+ }
+
+ Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
+ Object* builtin = NULL; // Initialization calms down the compiler.
+ switch (op) {
+ case Token::ADD:
+ builtin = builtins->javascript_builtin(Builtins::ADD);
+ break;
+ case Token::SUB:
+ builtin = builtins->javascript_builtin(Builtins::SUB);
+ break;
+ case Token::MUL:
+ builtin = builtins->javascript_builtin(Builtins::MUL);
+ break;
+ case Token::DIV:
+ builtin = builtins->javascript_builtin(Builtins::DIV);
+ break;
+ case Token::MOD:
+ builtin = builtins->javascript_builtin(Builtins::MOD);
+ break;
+ case Token::BIT_AND:
+ builtin = builtins->javascript_builtin(Builtins::BIT_AND);
+ break;
+ case Token::BIT_OR:
+ builtin = builtins->javascript_builtin(Builtins::BIT_OR);
+ break;
+ case Token::BIT_XOR:
+ builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
+ break;
+ case Token::SHR:
+ builtin = builtins->javascript_builtin(Builtins::SHR);
+ break;
+ case Token::SAR:
+ builtin = builtins->javascript_builtin(Builtins::SAR);
+ break;
+ case Token::SHL:
+ builtin = builtins->javascript_builtin(Builtins::SHL);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
+
+ bool caught_exception;
+ Handle<Object> builtin_args[] = { right };
+ Handle<Object> result = Execution::Call(isolate,
+ builtin_function,
+ left,
+ ARRAY_SIZE(builtin_args),
+ builtin_args,
+ &caught_exception);
+ if (caught_exception) {
+ return Failure::Exception();
+ }
+ return *result;
}
}
-Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
- switch (op) {
- default:
- UNREACHABLE();
- case Token::ADD:
- return Builtins::ADD;
- break;
- case Token::SUB:
- return Builtins::SUB;
- break;
- case Token::MUL:
- return Builtins::MUL;
- break;
- case Token::DIV:
- return Builtins::DIV;
- break;
- case Token::MOD:
- return Builtins::MOD;
- break;
- case Token::BIT_OR:
- return Builtins::BIT_OR;
- break;
- case Token::BIT_AND:
- return Builtins::BIT_AND;
- break;
- case Token::BIT_XOR:
- return Builtins::BIT_XOR;
- break;
- case Token::SAR:
- return Builtins::SAR;
- break;
- case Token::SHR:
- return Builtins::SHR;
- break;
- case Token::SHL:
- return Builtins::SHL;
- break;
- }
-}
-
-
MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object,
Code::ExtraICState extra_ic_state) {
ToBooleanStub stub(extra_ic_state);
ICU(LoadPropertyWithInterceptorForCall) \
ICU(KeyedLoadPropertyWithInterceptor) \
ICU(StoreInterceptorProperty) \
+ ICU(BinaryOp_Patch) \
ICU(CompareIC_Miss) \
- ICU(BinaryOpIC_Miss) \
ICU(CompareNilIC_Miss) \
ICU(Unreachable) \
ICU(ToBooleanIC_Miss)
GENERIC
};
- explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
+ static void StubInfoToType(int minor_key,
+ Handle<Type>* left,
+ Handle<Type>* right,
+ Handle<Type>* result,
+ Isolate* isolate);
+
+ explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
- static Builtins::JavaScript TokenToJSBuiltin(Token::Value op);
+ void patch(Code* code);
static const char* GetName(TypeInfo type_info);
- MUST_USE_RESULT MaybeObject* Transition(Handle<Object> left,
- Handle<Object> right);
+ static State ToState(TypeInfo type_info);
+
+ private:
+ static Handle<Type> TypeInfoToType(TypeInfo binary_type, Isolate* isolate);
};
DECLARE_RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss);
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss);
DONT_TRACK_ALLOCATION_SITE, 0);
stub.InitializeInterfaceDescriptor(
this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray));
- BinaryOpStub::InitializeForIsolate(this);
CompareNilICStub::InitializeForIsolate(this);
ToBooleanStub::InitializeForIsolate(this);
ArrayConstructorStubBase::InstallDescriptors(this);
case Code::FUNCTION:
case Code::OPTIMIZED_FUNCTION:
return; // We log this later using LogCompiledFunctions.
- case Code::BINARY_OP_IC: {
- BinaryOpStub stub(code_object->extended_extra_ic_state());
- description = stub.GetName().Detach();
- tag = Logger::STUB_TAG;
- break;
- }
+ case Code::BINARY_OP_IC: // fall through
case Code::COMPARE_IC: // fall through
case Code::COMPARE_NIL_IC: // fall through
case Code::TO_BOOLEAN_IC: // fall through
// TODO(danno): This is a bit of a hack right now since there are still
// clients of this API that pass "extra" values in for argc. These clients
// should be retrofitted to used ExtendedExtraICState.
- return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC ||
- kind == BINARY_OP_IC;
+ return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC;
}
inline StubType type(); // Only valid for monomorphic IC stubs.
int size = 0;
switch (identity()) {
case OLD_POINTER_SPACE:
- size = 72 * kPointerSize * KB;
+ size = 64 * kPointerSize * KB;
break;
case OLD_DATA_SPACE:
size = 192 * KB;
Handle<Type>* left,
Handle<Type>* right,
Handle<Type>* result,
- Maybe<int>* fixed_right_arg,
- Token::Value operation) {
+ Maybe<int>* fixed_right_arg) {
Handle<Object> object = GetInfo(id);
if (!object->IsCode()) {
- // For some binary ops we don't have ICs, e.g. Token::COMMA, but for the
- // operations covered by the BinaryOpStub we should always have them.
- ASSERT(!(operation >= BinaryOpStub::FIRST_TOKEN &&
- operation <= BinaryOpStub::LAST_TOKEN));
+ // For some binary ops we don't have ICs, e.g. Token::COMMA.
*left = *right = *result = handle(Type::None(), isolate_);
return;
}
Handle<Code> code = Handle<Code>::cast(object);
ASSERT(code->is_binary_op_stub());
- BinaryOpStub stub(code->extended_extra_ic_state());
-
- // Sanity check.
- ASSERT(stub.operation() == operation);
-
- *left = stub.GetLeftType(isolate());
- *right = stub.GetRightType(isolate());
- *result = stub.GetResultType(isolate());
- *fixed_right_arg = stub.fixed_right_arg();
+ int minor_key = code->stub_info();
+ BinaryOpIC::StubInfoToType(minor_key, left, right, result, isolate());
+ *fixed_right_arg =
+ BinaryOpStub::decode_fixed_right_arg_from_minor_key(minor_key);
}
}
-Handle<Type> TypeFeedbackOracle::IncrementType(CountOperation* expr) {
+TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
Handle<Object> object = GetInfo(expr->CountBinOpFeedbackId());
- Handle<Type> unknown(Type::None(), isolate_);
+ TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_binary_op_stub()) return unknown;
- BinaryOpStub stub(code->extended_extra_ic_state());
- return stub.GetLeftType(isolate());
+ BinaryOpIC::TypeInfo left_type, right_type, unused_result_type;
+ BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
+ &right_type, &unused_result_type);
+ // CountOperations should always have +1 or -1 as their right input.
+ ASSERT(right_type == BinaryOpIC::SMI ||
+ right_type == BinaryOpIC::UNINITIALIZED);
+
+ switch (left_type) {
+ case BinaryOpIC::UNINITIALIZED:
+ case BinaryOpIC::SMI:
+ return TypeInfo::Smi();
+ case BinaryOpIC::INT32:
+ return TypeInfo::Integer32();
+ case BinaryOpIC::NUMBER:
+ return TypeInfo::Double();
+ case BinaryOpIC::STRING:
+ case BinaryOpIC::GENERIC:
+ return unknown;
+ default:
+ return unknown;
+ }
+ UNREACHABLE();
+ return unknown;
}
Handle<Type>* left,
Handle<Type>* right,
Handle<Type>* result,
- Maybe<int>* fixed_right_arg,
- Token::Value operation);
+ Maybe<int>* fixed_right_arg);
void CompareType(TypeFeedbackId id,
Handle<Type>* left,
Handle<Type> ClauseType(TypeFeedbackId id);
- Handle<Type> IncrementType(CountOperation* expr);
+ TypeInfo IncrementType(CountOperation* expr);
Zone* zone() const { return zone_; }
Isolate* isolate() const { return isolate_; }
Handle<Type> type, left_type, right_type;
Maybe<int> fixed_right_arg;
oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
- &left_type, &right_type, &type, &fixed_right_arg, expr->op());
+ &left_type, &right_type, &type, &fixed_right_arg);
NarrowLowerType(expr, type);
NarrowLowerType(expr->left(), left_type);
NarrowLowerType(expr->right(), right_type);
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
+ SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
SC(fast_new_closure_total, V8.FastNewClosureTotal) \
SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
}
-void BinaryOpStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rdx, rax };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
- descriptor->SetMissHandler(
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
-}
-
-
static void InitializeArrayConstructorDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
// If the operands are not both numbers, jump to not_numbers.
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
// NumberOperands assumes both are smis or heap numbers.
+ static void LoadSSE2SmiOperands(MacroAssembler* masm);
static void LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers);
+
+ // Takes the operands in rdx and rax and loads them as integers in rax
+ // and rcx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ Label* operand_conversion_failure,
+ Register heap_number_map);
+
+ // Tries to convert two values to smis losslessly.
+ // This fails if either argument is not a Smi nor a HeapNumber,
+ // or if it's a HeapNumber with a value that can't be converted
+ // losslessly to a Smi. In that case, control transitions to the
+ // on_not_smis label.
+ // On success, either control goes to the on_success label (if one is
+ // provided), or it falls through at the end of the code (if on_success
+ // is NULL).
+ // On success, both first and second holds Smi tagged values.
+ // One of first or second must be non-Smi when entering.
+ static void NumbersToSmis(MacroAssembler* masm,
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* on_success,
+ Label* on_not_smis,
+ ConvertUndefined convert_undefined);
};
}
+void BinaryOpStub::Initialize() {}
+
+
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ __ PopReturnAddressTo(rcx);
+ __ push(rdx);
+ __ push(rax);
+ // Left and right arguments are now on top.
+ __ Push(Smi::FromInt(MinorKey()));
+
+ __ PushReturnAddressFrom(rcx);
+
+ // Patch the caller to an appropriate specialized stub and return the
+ // operation result to the caller of the stub.
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
+ masm->isolate()),
+ 3,
+ 1);
+}
+
+
+static void BinaryOpStub_GenerateSmiCode(
+ MacroAssembler* masm,
+ Label* slow,
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ Token::Value op) {
+
+ // Arguments to BinaryOpStub are in rdx and rax.
+ const Register left = rdx;
+ const Register right = rax;
+
+ // We only generate heapnumber answers for overflowing calculations
+ // for the four basic arithmetic operations and logical right shift by 0.
+ bool generate_inline_heapnumber_results =
+ (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
+ (op == Token::ADD || op == Token::SUB ||
+ op == Token::MUL || op == Token::DIV || op == Token::SHR);
+
+ // Smi check of both operands. If op is BIT_OR, the check is delayed
+ // until after the OR operation.
+ Label not_smis;
+ Label use_fp_on_smis;
+ Label fail;
+
+ if (op != Token::BIT_OR) {
+ Comment smi_check_comment(masm, "-- Smi check arguments");
+ __ JumpIfNotBothSmi(left, right, ¬_smis);
+ }
+
+ Label smi_values;
+ __ bind(&smi_values);
+ // Perform the operation.
+ Comment perform_smi(masm, "-- Perform smi operation");
+ switch (op) {
+ case Token::ADD:
+ ASSERT(right.is(rax));
+ __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
+ break;
+
+ case Token::SUB:
+ __ SmiSub(left, left, right, &use_fp_on_smis);
+ __ movq(rax, left);
+ break;
+
+ case Token::MUL:
+ ASSERT(right.is(rax));
+ __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
+ break;
+
+ case Token::DIV:
+ // SmiDiv will not accept left in rdx or right in rax.
+ __ movq(rbx, rax);
+ __ movq(rcx, rdx);
+ __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
+ break;
+
+ case Token::MOD:
+ // SmiMod will not accept left in rdx or right in rax.
+ __ movq(rbx, rax);
+ __ movq(rcx, rdx);
+ __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
+ break;
+
+ case Token::BIT_OR: {
+ ASSERT(right.is(rax));
+ __ SmiOrIfSmis(right, right, left, ¬_smis); // BIT_OR is commutative.
+ break;
+ }
+ case Token::BIT_XOR:
+ ASSERT(right.is(rax));
+ __ SmiXor(right, right, left); // BIT_XOR is commutative.
+ break;
+
+ case Token::BIT_AND:
+ ASSERT(right.is(rax));
+ __ SmiAnd(right, right, left); // BIT_AND is commutative.
+ break;
+
+ case Token::SHL:
+ __ SmiShiftLeft(left, left, right);
+ __ movq(rax, left);
+ break;
+
+ case Token::SAR:
+ __ SmiShiftArithmeticRight(left, left, right);
+ __ movq(rax, left);
+ break;
+
+ case Token::SHR:
+ __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
+ __ movq(rax, left);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ // 5. Emit return of result in rax. Some operations have registers pushed.
+ __ ret(0);
+
+ if (use_fp_on_smis.is_linked()) {
+ // 6. For some operations emit inline code to perform floating point
+ // operations on known smis (e.g., if the result of the operation
+ // overflowed the smi range).
+ __ bind(&use_fp_on_smis);
+ if (op == Token::DIV || op == Token::MOD) {
+ // Restore left and right to rdx and rax.
+ __ movq(rdx, rcx);
+ __ movq(rax, rbx);
+ }
+
+ if (generate_inline_heapnumber_results) {
+ __ AllocateHeapNumber(rcx, rbx, slow);
+ Comment perform_float(masm, "-- Perform float operation on smis");
+ if (op == Token::SHR) {
+ __ SmiToInteger32(left, left);
+ __ cvtqsi2sd(xmm0, left);
+ } else {
+ FloatingPointHelper::LoadSSE2SmiOperands(masm);
+ switch (op) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ }
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rcx);
+ __ ret(0);
+ } else {
+ __ jmp(&fail);
+ }
+ }
+
+ // 7. Non-smi operands reach the end of the code generated by
+ // GenerateSmiCode, and fall through to subsequent code,
+ // with the operands in rdx and rax.
+ // But first we check if non-smi values are HeapNumbers holding
+ // values that could be smi.
+ __ bind(¬_smis);
+ Comment done_comment(masm, "-- Enter non-smi code");
+ FloatingPointHelper::ConvertUndefined convert_undefined =
+ FloatingPointHelper::BAILOUT_ON_UNDEFINED;
+ // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
+ if (op == Token::BIT_AND ||
+ op == Token::BIT_OR ||
+ op == Token::BIT_XOR ||
+ op == Token::SAR ||
+ op == Token::SHL ||
+ op == Token::SHR) {
+ convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
+ }
+ FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
+ &smi_values, &fail, convert_undefined);
+ __ jmp(&smi_values);
+ __ bind(&fail);
+}
+
+
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode);
+
+
+static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure,
+ Token::Value op,
+ OverwriteMode mode) {
+ switch (op) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
+
+ switch (op) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, allocation_failure, mode);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ __ ret(0);
+ break;
+ }
+ case Token::MOD: {
+ // For MOD we jump to the allocation_failure label, to call runtime.
+ __ jmp(allocation_failure);
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ Label non_smi_shr_result;
+ Register heap_number_map = r9;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
+ heap_number_map);
+ switch (op) {
+ case Token::BIT_OR: __ orl(rax, rcx); break;
+ case Token::BIT_AND: __ andl(rax, rcx); break;
+ case Token::BIT_XOR: __ xorl(rax, rcx); break;
+ case Token::SAR: __ sarl_cl(rax); break;
+ case Token::SHL: __ shll_cl(rax); break;
+ case Token::SHR: {
+ __ shrl_cl(rax);
+ // Check if result is negative. This can only happen for a shift
+ // by zero.
+ __ testl(rax, rax);
+ __ j(negative, &non_smi_shr_result);
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ STATIC_ASSERT(kSmiValueSize == 32);
+ // Tag smi result and return.
+ __ Integer32ToSmi(rax, rax);
+ __ Ret();
+
+ // Logical shift right can produce an unsigned int32 that is not
+ // an int32, and so is not in the smi range. Allocate a heap number
+ // in that case.
+ if (op == Token::SHR) {
+ __ bind(&non_smi_shr_result);
+ Label allocation_failed;
+ __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
+ // Allocate heap number in new space.
+ // Not using AllocateHeapNumber macro in order to reuse
+ // already loaded heap_number_map.
+ __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed,
+ TAG_OBJECT);
+ // Set the map.
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ kHeapNumberMapRegisterClobbered);
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset),
+ heap_number_map);
+ __ cvtqsi2sd(xmm0, rbx);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
+ __ Ret();
+
+ __ bind(&allocation_failed);
+ // We need tagged values in rdx and rax for the following code,
+ // not int32 in rax and rcx.
+ __ Integer32ToSmi(rax, rcx);
+ __ Integer32ToSmi(rdx, rbx);
+ __ jmp(allocation_failure);
+ }
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+ // No fall-through from this generated code.
+ if (FLAG_debug_code) {
+ __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode);
+ }
+}
+
+
+static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn(
+ MacroAssembler* masm) {
+ // Push arguments, but ensure they are under the return address
+ // for a tail call.
+ __ PopReturnAddressTo(rcx);
+ __ push(rdx);
+ __ push(rax);
+ __ PushReturnAddressFrom(rcx);
+}
+
+
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD);
+ Label left_not_string, call_runtime;
+
+ // Registers containing left and right operands respectively.
+ Register left = rdx;
+ Register right = rax;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &left_not_string, Label::kNear);
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &left_not_string, Label::kNear);
+ StringAddStub string_add_left_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
+ BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
+ __ TailCallStub(&string_add_left_stub);
+
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
+ __ JumpIfSmi(right, &call_runtime, Label::kNear);
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &call_runtime, Label::kNear);
+
+ StringAddStub string_add_right_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
+ BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
+ __ TailCallStub(&string_add_right_stub);
+
+ // Neither argument is a string.
+ __ bind(&call_runtime);
+}
+
+
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label right_arg_changed, call_runtime;
+
+ if (op_ == Token::MOD && encoded_right_arg_.has_value) {
+ // It is guaranteed that the value will fit into a Smi, because if it
+ // didn't, we wouldn't be here, see BinaryOp_Patch.
+ __ Cmp(rax, Smi::FromInt(fixed_right_arg_value()));
+ __ j(not_equal, &right_arg_changed);
+ }
+
+ if (result_type_ == BinaryOpIC::UNINITIALIZED ||
+ result_type_ == BinaryOpIC::SMI) {
+ // Only allow smi results.
+ BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
+ } else {
+ // Allow heap number result and don't make a transition if a heap number
+ // cannot be allocated.
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
+ }
+
+ // Code falls through if the result is not returned as either a smi or heap
+ // number.
+ __ bind(&right_arg_changed);
+ GenerateTypeTransition(masm);
+
+ if (call_runtime.is_linked()) {
+ __ bind(&call_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
+ }
+}
+
+
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ // The int32 case is identical to the Smi case. We avoid creating this
+ // ic state on x64.
+ UNREACHABLE();
+}
+
+
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+ Label call_runtime;
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ // If both arguments are strings, call the string add stub.
+ // Otherwise, do a transition.
+
+ // Registers containing left and right operands respectively.
+ Register left = rdx;
+ Register right = rax;
+
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &call_runtime);
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &call_runtime);
+
+ // Test if right operand is a string.
+ __ JumpIfSmi(right, &call_runtime);
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &call_runtime);
+
+ StringAddStub string_add_stub(
+ (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
+ BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
+ __ TailCallStub(&string_add_stub);
+
+ __ bind(&call_runtime);
+ GenerateTypeTransition(masm);
+}
+
+
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (op_ == Token::ADD) {
+ // Handle string addition here, because it is the only operation
+ // that does not do a ToNumber conversion on the operands.
+ GenerateAddStrings(masm);
+ }
+
+ // Convert oddball arguments to numbers.
+ Label check, done;
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &check, Label::kNear);
+ if (Token::IsBitOp(op_)) {
+ __ xor_(rdx, rdx);
+ } else {
+ __ LoadRoot(rdx, Heap::kNanValueRootIndex);
+ }
+ __ jmp(&done, Label::kNear);
+ __ bind(&check);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &done, Label::kNear);
+ if (Token::IsBitOp(op_)) {
+ __ xor_(rax, rax);
+ } else {
+ __ LoadRoot(rax, Heap::kNanValueRootIndex);
+ }
+ __ bind(&done);
+
+ GenerateNumberStub(masm);
+}
+
+
+static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
+ Register input,
+ Label* fail) {
+ Label ok;
+ __ JumpIfSmi(input, &ok, Label::kNear);
+ Register heap_number_map = r8;
+ Register scratch1 = r9;
+ Register scratch2 = r10;
+ // HeapNumbers containing 32bit integer values are also allowed.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, fail);
+ __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
+ // Convert, convert back, and compare the two doubles' bits.
+ __ cvttsd2siq(scratch2, xmm0);
+ __ Cvtlsi2sd(xmm1, scratch2);
+ __ movq(scratch1, xmm0);
+ __ movq(scratch2, xmm1);
+ __ cmpq(scratch1, scratch2);
+ __ j(not_equal, fail);
+ __ bind(&ok);
+}
+
+
+void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
+ Label gc_required, not_number;
+
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ if (left_type_ == BinaryOpIC::SMI) {
+ BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ BinaryOpStub_CheckSmiInput(masm, rax, ¬_number);
+ }
+
+ BinaryOpStub_GenerateFloatingPointCode(
+ masm, &gc_required, ¬_number, op_, mode_);
+
+ __ bind(¬_number);
+ GenerateTypeTransition(masm);
+
+ __ bind(&gc_required);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
+}
+
+
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+ Label call_runtime, call_string_add_or_runtime;
+
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
+
+ BinaryOpStub_GenerateFloatingPointCode(
+ masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
+
+ __ bind(&call_string_add_or_runtime);
+ if (op_ == Token::ADD) {
+ GenerateAddStrings(masm);
+ }
+
+ __ bind(&call_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
+ }
+ __ Ret();
+}
+
+
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode) {
+ Label skip_allocation;
+ switch (mode) {
+ case OVERWRITE_LEFT: {
+ // If the argument in rdx is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(rdx, &skip_allocation);
+ // Allocate a heap number for the result. Keep rax and rdx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, alloc_failure);
+ // Now rdx can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ movq(rdx, rbx);
+ __ bind(&skip_allocation);
+ // Use object in rdx as a result holder
+ __ movq(rax, rdx);
+ break;
+ }
+ case OVERWRITE_RIGHT:
+ // If the argument in rax is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(rax, &skip_allocation);
+ // Fall through!
+ case NO_OVERWRITE:
+ // Allocate a heap number for the result. Keep rax and rdx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, alloc_failure);
+ // Now rax can be overwritten losing one of the arguments as we are
+ // now done and will not need it any more.
+ __ movq(rax, rbx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ push(rdx);
+ __ push(rax);
+}
+
+
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// TAGGED case:
// Input:
}
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+// Jump to conversion_failure: rdx and rax are unchanged.
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ Label* conversion_failure,
+ Register heap_number_map) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ __ JumpIfNotSmi(rdx, &arg1_is_object);
+ __ SmiToInteger32(r8, rdx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ Set(r8, 0);
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, &check_undefined_arg1);
+ // Get the untagged integer version of the rdx heap number in r8.
+ __ TruncateHeapNumberToI(r8, rdx);
+
+ // Here r8 has the untagged integer, rax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ // Test if arg2 is a Smi.
+ __ JumpIfNotSmi(rax, &arg2_is_object);
+ __ SmiToInteger32(rcx, rax);
+ __ jmp(&done);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, conversion_failure);
+ __ Set(rcx, 0);
+ __ jmp(&done);
+
+ __ bind(&arg2_is_object);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, &check_undefined_arg2);
+ // Get the untagged integer version of the rax heap number in rcx.
+ __ TruncateHeapNumberToI(rcx, rax);
+
+ __ bind(&done);
+ __ movl(rax, r8);
+}
+
+
+void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ Cvtlsi2sd(xmm0, kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ Cvtlsi2sd(xmm1, kScratchRegister);
+}
+
+
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
}
+void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* on_success,
+ Label* on_not_smis,
+ ConvertUndefined convert_undefined) {
+ Register heap_number_map = scratch3;
+ Register smi_result = scratch1;
+ Label done, maybe_undefined_first, maybe_undefined_second, first_done;
+
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ Label first_smi;
+ __ JumpIfSmi(first, &first_smi, Label::kNear);
+ __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal,
+ (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
+ ? &maybe_undefined_first
+ : on_not_smis);
+ // Convert HeapNumber to smi if possible.
+ __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
+ __ movq(scratch2, xmm0);
+ __ cvttsd2siq(smi_result, xmm0);
+ // Check if conversion was successful by converting back and
+ // comparing to the original double's bits.
+ __ Cvtlsi2sd(xmm1, smi_result);
+ __ movq(kScratchRegister, xmm1);
+ __ cmpq(scratch2, kScratchRegister);
+ __ j(not_equal, on_not_smis);
+ __ Integer32ToSmi(first, smi_result);
+
+ __ bind(&first_done);
+ __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
+ __ bind(&first_smi);
+ __ AssertNotSmi(second);
+ __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal,
+ (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
+ ? &maybe_undefined_second
+ : on_not_smis);
+ // Convert second to smi, if possible.
+ __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
+ __ movq(scratch2, xmm0);
+ __ cvttsd2siq(smi_result, xmm0);
+ __ Cvtlsi2sd(xmm1, smi_result);
+ __ movq(kScratchRegister, xmm1);
+ __ cmpq(scratch2, kScratchRegister);
+ __ j(not_equal, on_not_smis);
+ __ Integer32ToSmi(second, smi_result);
+ if (on_success != NULL) {
+ __ jmp(on_success);
+ } else {
+ __ jmp(&done);
+ }
+
+ __ bind(&maybe_undefined_first);
+ __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, on_not_smis);
+ __ xor_(first, first);
+ __ jmp(&first_done);
+
+ __ bind(&maybe_undefined_second);
+ __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, on_not_smis);
+ __ xor_(second, second);
+ if (on_success != NULL) {
+ __ jmp(on_success);
+ }
+ // Else: fall through.
+
+ __ bind(&done);
+}
+
+
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = rdx;
const Register base = rax;
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
- BinaryOpStub::GenerateAheadOfTime(isolate);
}