static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cond,
- bool never_nan_nan);
+ Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
}
-void FloatingPointHelper::LoadOperands(
- MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* slow) {
-
- // Load right operand (r0) to d6 or r2/r3.
- LoadNumber(masm, destination,
- r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
-
- // Load left operand (r1) to d7 or r0/r1.
- LoadNumber(masm, destination,
- r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
-}
-
-
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Destination destination,
Register object,
!scratch1.is(scratch3) &&
!scratch2.is(scratch3));
- Label done;
+ Label done, maybe_undefined;
__ UntagAndJumpIfSmi(dst, object, &done);
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
__ tst(scratch1, Operand(HeapNumber::kSignMask));
__ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
}
+ __ b(&done);
+
+ __ bind(&maybe_undefined);
+ __ CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ __ b(ne, not_int32);
+ // |undefined| is truncated to 0.
+ __ mov(dst, Operand(Smi::FromInt(0)));
+ // Fall through.
__ bind(&done);
}
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cond,
- bool never_nan_nan) {
+ Condition cond) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r0, r1);
__ b(ne, ¬_identical);
- // The two objects are identical. If we know that one of them isn't NaN then
- // we now know they test equal.
- if (cond != eq || !never_nan_nan) {
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cond == lt || cond == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cond == lt || cond == gt) {
+ __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, slow);
+ } else {
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cond == le || cond == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r2);
- __ b(ne, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ mov(r0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cond == le || cond == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r2);
+ __ b(ne, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ mov(r0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ mov(r0, Operand(LESS));
}
+ __ Ret();
}
}
}
}
__ Ret();
- if (cond != eq || !never_nan_nan) {
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cond != lt && cond != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r3, Operand(-1));
- __ b(ne, &return_equal);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
- // value if it's a NaN.
- if (cond != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq);
- if (cond == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
- }
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cond != lt && cond != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r3, Operand(-1));
+ __ b(ne, &return_equal);
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ orr(r0, r3, Operand(r2), SetCC);
+ // For equal we already have the right value in r0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
+ if (cond != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq);
+ if (cond == le) {
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
}
- __ Ret();
}
- // No fall through here.
+ __ Ret();
}
+ // No fall through here.
__ bind(¬_identical);
}
}
-// On entry lhs_ and rhs_ are the values to be compared.
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::HEAP_NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about symbol/string here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+// On entry r1 and r2 are the values to be compared.
// On exit r0 is 0, positive or negative to indicate the result of
// the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = r1;
+ Register rhs = r0;
+ Condition cc = GetCondition();
+
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
- if (include_smi_compare_) {
- Label not_two_smis, smi_done;
- __ orr(r2, r1, r0);
- __ JumpIfNotSmi(r2, ¬_two_smis);
- __ mov(r1, Operand(r1, ASR, 1));
- __ sub(r0, r1, Operand(r0, ASR, 1));
- __ Ret();
- __ bind(¬_two_smis);
- } else if (FLAG_debug_code) {
- __ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "CompareStub: unexpected smi operands.");
- }
+ Label not_two_smis, smi_done;
+ __ orr(r2, r1, r0);
+ __ JumpIfNotSmi(r2, ¬_two_smis);
+ __ mov(r1, Operand(r1, ASR, 1));
+ __ sub(r0, r1, Operand(r0, ASR, 1));
+ __ Ret();
+ __ bind(¬_two_smis);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, lhs_, Operand(rhs_));
+ __ and_(r2, lhs, Operand(rhs));
__ JumpIfNotSmi(r2, ¬_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
// comparison. If VFP3 is supported the double values of the numbers have
// been loaded into d7 and d6. Otherwise, the double values have been loaded
// into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
+ EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
__ bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in d6 and d7, if
// If one of the sides was a NaN then the v flag is set. Load r0 with
// whatever it takes to make the comparison fail, since comparisons with NaN
// always fail.
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
__ mov(r0, Operand(GREATER));
} else {
__ mov(r0, Operand(LESS));
} else {
// Checks for NaN in the doubles we have loaded. Can return the answer or
// fall through if neither is a NaN. Also binds lhs_not_nan.
- EmitNanCheck(masm, &lhs_not_nan, cc_);
+ EmitNanCheck(masm, &lhs_not_nan, cc);
// Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
// answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
+ EmitTwoNonNanDoubleComparison(masm, cc);
}
__ bind(¬_smis);
// At this point we know we are dealing with two different objects,
// and neither of them is a Smi. The objects are in rhs_ and lhs_.
- if (strict_) {
+ if (strict()) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
}
Label check_for_symbols;
// that case. If the inputs are not doubles then jumps to check_for_symbols.
// In this case r2 will contain the type of rhs_. Never falls through.
EmitCheckForTwoHeapNumbers(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
&both_loaded_as_doubles,
&check_for_symbols,
&flat_string_check);
__ bind(&check_for_symbols);
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
// symbols.
- if (cc_ == eq && !strict_) {
+ if (cc == eq && !strict()) {
// Returns an answer for two symbols or two detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that r2 is the type of rhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ EmitCheckForSymbolsOrObjects(masm, lhs, rhs, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
// case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
- if (cc_ == eq) {
+ if (cc == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
r2,
r3,
r4);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
r2,
r3,
r4,
__ bind(&slow);
- __ Push(lhs_, rhs_);
+ __ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- if (cc_ == eq) {
- native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
ncr = GREATER;
} else {
- ASSERT(cc_ == gt || cc_ == ge); // remaining cases
+ ASSERT(cc == gt || cc == ge); // remaining cases
ncr = LESS;
}
__ mov(r0, Operand(Smi::FromInt(ncr)));
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
}
+void BinaryOpStub::Initialize() {
+ platform_specific_bit_ = CpuFeatures::IsSupported(VFP2);
+}
+
+
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
__ Push(r1, r0);
__ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
- __ mov(r0, Operand(Smi::FromInt(operands_type_)));
- __ Push(r2, r1, r0);
+ __ push(r2);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
+void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
+ Token::Value op) {
Register left = r1;
Register right = r0;
Register scratch1 = r7;
STATIC_ASSERT(kSmiTag == 0);
Label not_smi_result;
- switch (op_) {
+ switch (op) {
case Token::ADD:
__ add(right, left, Operand(right), SetCC); // Add optimistically.
__ Ret(vc);
}
-void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required) {
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode);
+
+
+void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required,
+ Label* miss,
+ Token::Value op,
+ OverwriteMode mode) {
Register left = r1;
Register right = r0;
Register scratch1 = r7;
__ AssertSmi(left);
__ AssertSmi(right);
}
+ if (left_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, miss);
+ }
+ if (right_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, miss);
+ }
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- switch (op_) {
+ switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(VFP2) &&
- op_ != Token::MOD ?
+ op != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
// Allocate new heap number for result.
Register result = r5;
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
// Load the operands.
if (smi_operands) {
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
} else {
- FloatingPointHelper::LoadOperands(masm,
- destination,
- heap_number_map,
- scratch1,
- scratch2,
- not_numbers);
+ // Load right operand to d7 or r2/r3.
+ if (right_type == BinaryOpIC::INT32) {
+ FloatingPointHelper::LoadNumberAsInt32Double(
+ masm, right, destination, d7, d8, r2, r3, heap_number_map,
+ scratch1, scratch2, s0, miss);
+ } else {
+ Label* fail = (right_type == BinaryOpIC::HEAP_NUMBER) ? miss
+ : not_numbers;
+ FloatingPointHelper::LoadNumber(
+ masm, destination, right, d7, r2, r3, heap_number_map,
+ scratch1, scratch2, fail);
+ }
+ // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it
+ // jumps to |miss|.
+ if (left_type == BinaryOpIC::INT32) {
+ FloatingPointHelper::LoadNumberAsInt32Double(
+ masm, left, destination, d6, d8, r0, r1, heap_number_map,
+ scratch1, scratch2, s0, miss);
+ } else {
+ Label* fail = (left_type == BinaryOpIC::HEAP_NUMBER) ? miss
+ : not_numbers;
+ FloatingPointHelper::LoadNumber(
+ masm, destination, left, d6, r0, r1, heap_number_map,
+ scratch1, scratch2, fail);
+ }
}
// Calculate the result.
// d6: Left value
// d7: Right value
CpuFeatures::Scope scope(VFP2);
- switch (op_) {
+ switch (op) {
case Token::ADD:
__ vadd(d5, d6, d7);
break;
} else {
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op_,
+ op,
result,
scratch1);
if (FLAG_debug_code) {
}
Label result_not_a_smi;
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
__ orr(r2, r3, Operand(r2));
break;
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
} else {
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required,
+ mode);
}
// r2: Answer as signed int32.
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r2);
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ vcvt_f64_u32(d0, s0);
} else {
__ vcvt_f64_s32(d0, s0);
// Generate the smi code. If the operation on smis are successful this return is
// generated. If the result is not a smi and heap number allocation is not
// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the lable gc_required.
-void BinaryOpStub::GenerateSmiCode(
+// heap number cannot be allocated the code jumps to the label gc_required.
+void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* use_runtime,
Label* gc_required,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ Token::Value op,
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ OverwriteMode mode) {
Label not_smis;
Register left = r1;
__ JumpIfNotSmi(scratch1, ¬_smis);
// If the smi-smi operation results in a smi return is generated.
- GenerateSmiSmiOperation(masm);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op);
// If heap number results are possible generate the result in an allocated
// heap number.
- if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
- GenerateFPOperation(masm, true, use_runtime, gc_required);
+ if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
+ BinaryOpStub_GenerateFPOperation(
+ masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
+ use_runtime, gc_required, ¬_smis, op, mode);
}
__ bind(¬_smis);
}
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- GenerateSmiCode(masm,
- &call_runtime,
- &call_runtime,
- ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
+ mode_);
}
// Code falls through if the result is not returned as either a smi or heap
GenerateTypeTransition(masm);
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::INT32);
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
Register left = r1;
Register right = r0;
Label skip;
__ orr(scratch1, left, right);
__ JumpIfNotSmi(scratch1, &skip);
- GenerateSmiSmiOperation(masm);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
// Fall through if the result is not a smi.
__ bind(&skip);
case Token::MUL:
case Token::DIV:
case Token::MOD: {
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, &transition);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, &transition);
+ }
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call.
: BinaryOpIC::INT32)) {
// We are using vfp registers so r5 is available.
heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
__ sub(r0, heap_number_result, Operand(kHeapObjectTag));
__ vstr(d5, r0, HeapNumber::kValueOffset);
__ mov(r0, heap_number_result);
// Allocate a heap number to store the result.
heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &pop_and_call_runtime,
+ mode_);
// Load the left value from the value saved on the stack.
__ Pop(r1, r0);
__ bind(&return_heap_number);
heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
}
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime;
- GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
+ Label call_runtime, transition;
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &transition, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
+ Label call_runtime, call_string_add_or_runtime, transition;
- GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
- GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
}
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
}
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode) {
// Code below will scratch result if allocation fails. To keep both arguments
// intact for the runtime call result cannot be one of these.
ASSERT(!result.is(r0) && !result.is(r1));
- if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+ if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
Label skip_allocation, allocated;
- Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
+ Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0;
// If the overwritable operand is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(overwritable_operand, &skip_allocation);
__ mov(result, Operand(overwritable_operand));
__ bind(&allocated);
} else {
- ASSERT(mode_ == NO_OVERWRITE);
+ ASSERT(mode == NO_OVERWRITE);
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
}
}
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- const char* cc_name;
- switch (cc_) {
- case lt: cc_name = "LT"; break;
- case gt: cc_name = "GT"; break;
- case le: cc_name = "LE"; break;
- case ge: cc_name = "GE"; break;
- case eq: cc_name = "EQ"; break;
- case ne: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == eq || cc_ == ne;
- stream->Add("CompareStub_%s", cc_name);
- stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
- stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
- | RegisterField::encode(lhs_.is(r0))
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ orr(r2, r1, r0);
__ JumpIfNotSmi(r2, &miss);
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+ ASSERT(state_ == CompareIC::HEAP_NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- __ and_(r2, r1, Operand(r0));
- __ JumpIfSmi(r2, &generic_stub);
- __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined1);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined2);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(r1, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(r0, &miss);
+ }
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or VFP3 is unsupported.
+ // stub if NaN is involved or VFP2 is unsupported.
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
- // Load left and right operand
- __ sub(r2, r1, Operand(kHeapObjectTag));
- __ vldr(d0, r2, HeapNumber::kValueOffset);
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(r0, &right_smi);
+ __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
__ sub(r2, r0, Operand(kHeapObjectTag));
__ vldr(d1, r2, HeapNumber::kValueOffset);
+ __ b(&left);
+ __ bind(&right_smi);
+ __ SmiUntag(r2, r0); // Can't clobber r0 yet.
+ SwVfpRegister single_scratch = d2.low();
+ __ vmov(single_scratch, r2);
+ __ vcvt_f64_s32(d1, single_scratch);
+
+ __ bind(&left);
+ __ JumpIfSmi(r1, &left_smi);
+ __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ sub(r2, r1, Operand(kHeapObjectTag));
+ __ vldr(d0, r2, HeapNumber::kValueOffset);
+ __ b(&done);
+ __ bind(&left_smi);
+ __ SmiUntag(r2, r1); // Can't clobber r1 yet.
+ single_scratch = d3.low();
+ __ vmov(single_scratch, r2);
+ __ vcvt_f64_s32(d0, single_scratch);
- // Compare operands
+ __ bind(&done);
+ // Compare operands.
__ VFPCompareAndSetFlags(d0, d1);
// Don't base result on status bits when a NaN is involved.
}
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
__ bind(&generic_stub);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(ne, &miss);
+ __ JumpIfSmi(r1, &unordered);
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &maybe_undefined2);
__ jmp(&unordered);
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+ ASSERT(state_ == CompareIC::SYMBOL);
Label miss;
// Registers containing left and right operands respectively.
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
};
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- use_vfp2_ = CpuFeatures::IsSupported(VFP2);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_vfp2_(VFP2Bits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_vfp2_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class VFP2Bits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | VFP2Bits::encode(use_vfp2_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiSmiOperation(MacroAssembler* masm);
- void GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
Register scratch1,
Register scratch2);
- // Loads objects from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will still be scratched. If
- // either r0 or r1 is not a number (not smi and not heap number object) the
- // not_number label is jumped to with r0 and r1 intact.
- static void LoadOperands(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
Register heap_number_result,
Register scratch);
- private:
+ // Loads the objects from |object| into floating point registers.
+ // Depending on |destination| the value ends up either in |dst| or
+ // in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3
+ // must be supported. If kCoreRegisters are requested and VFP3 is
+ // supported, |dst| will be scratched. If |object| is neither smi nor
+ // heap number, |not_number| is jumped to with |object| still intact.
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
}
- __ mov(r1, Operand(Smi::FromInt(count_value)));
+ __ mov(r1, r0);
+ __ mov(r0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
default: {
VisitForAccumulatorValue(expr->right());
- Condition cond = eq;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cond = eq;
- break;
- case Token::LT:
- cond = lt;
- break;
- case Token::GT:
- cond = gt;
- break;
- case Token::LTE:
- cond = le;
- break;
- case Token::GTE:
- cond = ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cond = CompareIC::ComputeCondition(op);
__ pop(r1);
bool inline_smi_code = ShouldInlineSmiCase(op);
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
- State state = TargetState(previous_state, false, x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address cmp_instruction_address =
+ Assembler::return_address_from_call_start(address);
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
+ // If the instruction following the call is not a cmp rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(cmp_instruction_address);
+ return Assembler::IsCmpImmediate(instr);
}
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
+ Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
+ for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
}
if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ if (FLAG_code_comments) {
+ HValue* hydrogen = instr->hydrogen_value();
+ if (hydrogen != NULL) {
+ if (hydrogen->IsChange()) {
+ HValue* changed_value = HChange::cast(hydrogen)->value();
+ int use_id = 0;
+ const char* use_mnemo = "dead";
+ if (hydrogen->UseCount() >= 1) {
+ HValue* use_value = hydrogen->uses().value();
+ use_id = use_value->id();
+ use_mnemo = use_value->Mnemonic();
+ }
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
+ current_instruction_, instr->Mnemonic(),
+ changed_value->id(), changed_value->Mnemonic(),
+ use_id, use_mnemo);
+ } else {
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
+ instr->Mnemonic(), hydrogen->id());
+ }
+ } else {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ }
+ }
instr->CompileToNative(this);
}
}
void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo info = oracle->SwitchType(this);
+ if (info.IsUninitialized()) info = TypeInfo::Unknown();
if (info.IsSmi()) {
compare_type_ = SMI_ONLY;
} else if (info.IsSymbol()) {
}
-void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- TypeInfo info = oracle->CompareType(this);
- if (info.IsSmi()) {
- compare_type_ = SMI_ONLY;
- } else if (info.IsNonPrimitive()) {
- compare_type_ = OBJECT_ONLY;
- } else {
- ASSERT(compare_type_ == NONE);
- }
-}
-
-
void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this)
? oracle->GetObjectLiteralStoreMap(this)
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
- bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
op_(op),
left_(left),
right_(right),
- pos_(pos),
- compare_type_(NONE) {
+ pos_(pos) {
ASSERT(Token::IsCompareOp(op));
}
Expression* left_;
Expression* right_;
int pos_;
-
- enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
- CompareTypeFeedback compare_type_;
};
}
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
+ BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
+ if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
+ // The OddballStub handles a number and an oddball, not two oddballs.
+ operands_type = BinaryOpIC::GENERIC;
+ }
+ switch (operands_type) {
+ case BinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case BinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case BinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case BinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case BinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
+ case BinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case BinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+#undef __
+
+
+void BinaryOpStub::PrintName(StringStream* stream) {
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+ stream->Add("BinaryOpStub_%s_%s_%s+%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(left_type_),
+ BinaryOpIC::GetName(right_type_));
+}
+
+
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
+ GenerateBothStringStub(masm);
+ return;
+ }
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // BinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
+}
+
+
void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
ASSERT(*known_map_ != NULL);
Isolate* isolate = new_object->GetIsolate();
flags));
if (probe->IsCode()) {
*code_out = Code::cast(*probe);
- ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ);
+#ifdef DEBUG
+ Token::Value cached_op;
+ ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL,
+ &cached_op);
+ ASSERT(op_ == cached_op);
+#endif
return true;
}
return false;
int ICCompareStub::MinorKey() {
- return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
+ return OpField::encode(op_ - Token::EQ) |
+ LeftStateField::encode(left_) |
+ RightStateField::encode(right_) |
+ HandlerStateField::encode(state_);
+}
+
+
+void ICCompareStub::DecodeMinorKey(int minor_key,
+ CompareIC::State* left_state,
+ CompareIC::State* right_state,
+ CompareIC::State* handler_state,
+ Token::Value* op) {
+ if (left_state) {
+ *left_state =
+ static_cast<CompareIC::State>(LeftStateField::decode(minor_key));
+ }
+ if (right_state) {
+ *right_state =
+ static_cast<CompareIC::State>(RightStateField::decode(minor_key));
+ }
+ if (handler_state) {
+ *handler_state =
+ static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
+ }
+ if (op) {
+ *op = static_cast<Token::Value>(OpField::decode(minor_key) + Token::EQ);
+ }
}
case CompareIC::UNINITIALIZED:
GenerateMiss(masm);
break;
- case CompareIC::SMIS:
+ case CompareIC::SMI:
GenerateSmis(masm);
break;
- case CompareIC::HEAP_NUMBERS:
+ case CompareIC::HEAP_NUMBER:
GenerateHeapNumbers(masm);
break;
- case CompareIC::STRINGS:
+ case CompareIC::STRING:
GenerateStrings(masm);
break;
- case CompareIC::SYMBOLS:
+ case CompareIC::SYMBOL:
GenerateSymbols(masm);
break;
- case CompareIC::OBJECTS:
+ case CompareIC::OBJECT:
GenerateObjects(masm);
break;
case CompareIC::KNOWN_OBJECTS:
ASSERT(*known_map_ != NULL);
GenerateKnownObjects(masm);
break;
- default:
- UNREACHABLE();
+ case CompareIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
}
}
};
+class BinaryOpStub: public CodeStub {
+ public:
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ platform_specific_bit_(false),
+ left_type_(BinaryOpIC::UNINITIALIZED),
+ right_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED) {
+ Initialize();
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ BinaryOpStub(
+ int key,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ platform_specific_bit_(PlatformSpecificBits::decode(key)),
+ left_type_(left_type),
+ right_type_(right_type),
+ result_type_(result_type) { }
+
+ static void decode_types_from_minor_key(int minor_key,
+ BinaryOpIC::TypeInfo* left_type,
+ BinaryOpIC::TypeInfo* right_type,
+ BinaryOpIC::TypeInfo* result_type) {
+ *left_type =
+ static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key));
+ *right_type =
+ static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key));
+ *result_type =
+ static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key));
+ }
+
+ static Token::Value decode_op_from_minor_key(int minor_key) {
+ return static_cast<Token::Value>(OpBits::decode(minor_key));
+ }
+
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM.
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo left_type_;
+ BinaryOpIC::TypeInfo right_type_;
+ BinaryOpIC::TypeInfo result_type_;
+
+ virtual void PrintName(StringStream* stream);
+
+ // Minor key encoding in 19 bits TTTRRRLLLSOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class PlatformSpecificBits: public BitField<bool, 9, 1> {};
+ class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+ class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
+ class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
+
+ Major MajorKey() { return BinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | PlatformSpecificBits::encode(platform_specific_bit_)
+ | LeftTypeBits::encode(left_type_)
+ | RightTypeBits::encode(right_type_)
+ | ResultTypeBits::encode(result_type_);
+ }
+
+
+ // Platform-independent implementation.
+ void Generate(MacroAssembler* masm);
+ void GenerateCallRuntime(MacroAssembler* masm);
+
+ // Platform-independent signature, platform-specific implementation.
+ void Initialize();
+ void GenerateAddStrings(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+
+ // Entirely platform-specific methods are defined as static helper
+ // functions in the <arch>/code-stubs-<arch>.cc files.
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(Max(left_type_, right_type_));
+ }
+
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_stub_info(MinorKey());
+ }
+
+ friend class CodeGenerator;
+};
+
+
class ICCompareStub: public CodeStub {
public:
- ICCompareStub(Token::Value op, CompareIC::State state)
- : op_(op), state_(state) {
+ ICCompareStub(Token::Value op,
+ CompareIC::State left,
+ CompareIC::State right,
+ CompareIC::State handler)
+ : op_(op),
+ left_(left),
+ right_(right),
+ state_(handler) {
ASSERT(Token::IsCompareOp(op));
}
void set_known_map(Handle<Map> map) { known_map_ = map; }
+ static void DecodeMinorKey(int minor_key,
+ CompareIC::State* left_state,
+ CompareIC::State* right_state,
+ CompareIC::State* handler_state,
+ Token::Value* op);
+
+ static CompareIC::State CompareState(int minor_key) {
+ return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
+ }
+
private:
class OpField: public BitField<int, 0, 3> { };
- class StateField: public BitField<int, 3, 5> { };
+ class LeftStateField: public BitField<int, 3, 3> { };
+ class RightStateField: public BitField<int, 6, 3> { };
+ class HandlerStateField: public BitField<int, 9, 3> { };
virtual void FinishCode(Handle<Code> code) {
- code->set_compare_state(state_);
- code->set_compare_operation(op_ - Token::EQ);
+ code->set_stub_info(MinorKey());
}
virtual CodeStub::Major MajorKey() { return CompareIC; }
void GenerateObjects(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm);
void GenerateKnownObjects(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; }
Token::Value op_;
+ CompareIC::State left_;
+ CompareIC::State right_;
CompareIC::State state_;
Handle<Map> known_map_;
};
-// Flags that control the compare stub code generation.
-enum CompareFlags {
- NO_COMPARE_FLAGS = 0,
- NO_SMI_COMPARE_IN_STUB = 1 << 0,
- NO_NUMBER_COMPARE_IN_STUB = 1 << 1,
- CANT_BOTH_BE_NAN = 1 << 2
-};
-
-
-enum NaNInformation {
- kBothCouldBeNaN,
- kCantBothBeNaN
-};
-
-
-class CompareStub: public CodeStub {
- public:
- CompareStub(Condition cc,
- bool strict,
- CompareFlags flags,
- Register lhs,
- Register rhs) :
- cc_(cc),
- strict_(strict),
- never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
- include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
- include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
- lhs_(lhs),
- rhs_(rhs) { }
-
- CompareStub(Condition cc,
- bool strict,
- CompareFlags flags) :
- cc_(cc),
- strict_(strict),
- never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
- include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
- include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
- lhs_(no_reg),
- rhs_(no_reg) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Condition cc_;
- bool strict_;
- // Only used for 'equal' comparisons. Tells the stub that we already know
- // that at least one side of the comparison is not NaN. This allows the
- // stub to use object identity in the positive case. We ignore it when
- // generating the minor key for other comparisons to avoid creating more
- // stubs.
- bool never_nan_nan_;
- // Do generate the number comparison code in the stub. Stubs without number
- // comparison code is used when the number comparison has been inlined, and
- // the stub will be called if one of the operands is not a number.
- bool include_number_compare_;
-
- // Generate the comparison code for two smi operands in the stub.
- bool include_smi_compare_;
-
- // Register holding the left hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
-
- Register lhs_;
- // Register holding the right hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
- Register rhs_;
-
- // Encoding of the minor key in 16 bits.
- class StrictField: public BitField<bool, 0, 1> {};
- class NeverNanNanField: public BitField<bool, 1, 1> {};
- class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
- class IncludeSmiCompareField: public BitField<bool, 3, 1> {};
- class RegisterField: public BitField<bool, 4, 1> {};
- class ConditionField: public BitField<int, 5, 11> {};
-
- Major MajorKey() { return Compare; }
-
- int MinorKey();
-
- virtual int GetCodeKind() { return Code::COMPARE_IC; }
- virtual void FinishCode(Handle<Code> code) {
- code->set_compare_state(CompareIC::GENERIC);
- }
-
- // Branch to the label if the given object isn't a symbol.
- void BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch);
-
- // Unfortunately you have to run without snapshots to see most of these
- // names in the profile since most compare stubs end up in the snapshot.
- virtual void PrintName(StringStream* stream);
-};
-
-
class CEntryStub : public CodeStub {
public:
explicit CEntryStub(int result_size,
bool IsEmpty() const { return set_.IsEmpty(); }
bool Contains(Type type) const { return set_.Contains(type); }
+ bool ContainsAnyOf(Types types) const {
+ return set_.ContainsAnyOf(types.set_);
+ }
void Add(Type type) { set_.Add(type); }
byte ToByte() const { return set_.ToIntegral(); }
void Print(StringStream* stream) const;
}
// Now we rely on year and month being SMIs.
- return %DateMakeDay(year, month) + date - 1;
+ return %DateMakeDay(year | 0, month | 0) + date - 1;
}
code->set_check_type(RECEIVER_MAP_CHECK);
}
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
+ code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
}
+void HValue::InferRepresentation(HInferRepresentation* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ Representation new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+ new_rep = RepresentationFromUses();
+ UpdateRepresentation(new_rep, h_infer, "uses");
+}
+
+
+Representation HValue::RepresentationFromUses() {
+ if (HasNoUses()) return Representation::None();
+
+ // Array of use counts for each representation.
+ int use_count[Representation::kNumRepresentations] = { 0 };
+
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ Representation rep = use->observed_input_representation(it.index());
+ if (rep.IsNone()) continue;
+ if (FLAG_trace_representation) {
+ PrintF("#%d %s is used by #%d %s as %s%s\n",
+ id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
+ (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
+ }
+ use_count[rep.kind()] += use->LoopWeight();
+ }
+ if (IsPhi()) HPhi::cast(this)->AddIndirectUsesTo(&use_count[0]);
+ int tagged_count = use_count[Representation::kTagged];
+ int double_count = use_count[Representation::kDouble];
+ int int32_count = use_count[Representation::kInteger32];
+
+ if (tagged_count > 0) return Representation::Tagged();
+ if (double_count > 0) return Representation::Double();
+ if (int32_count > 0) return Representation::Integer32();
+
+ return Representation::None();
+}
+
+
+void HValue::UpdateRepresentation(Representation new_rep,
+ HInferRepresentation* h_infer,
+ const char* reason) {
+ Representation r = representation();
+ if (new_rep.is_more_general_than(r)) {
+ // When an HConstant is marked "not convertible to integer", then
+ // never try to represent it as an integer.
+ if (new_rep.IsInteger32() && !IsConvertibleToInteger()) {
+ new_rep = Representation::Tagged();
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d %s representation %s -> %s because it's NCTI"
+ " (%s want i)\n",
+ id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
+ }
+ } else {
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d %s representation %s -> %s based on %s\n",
+ id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
+ }
+ }
+ ChangeRepresentation(new_rep);
+ AddDependantsToWorklist(h_infer);
+ }
+}
+
+
+void HValue::AddDependantsToWorklist(HInferRepresentation* h_infer) {
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ h_infer->AddToWorklist(it.value());
+ }
+ for (int i = 0; i < OperandCount(); ++i) {
+ h_infer->AddToWorklist(OperandAt(i));
+ }
+}
+
+
static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
if (result > kMaxInt) {
*overflow = true;
bool HValue::CheckUsesForFlag(Flag f) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ if (it.value()->IsSimulate()) continue;
if (!it.value()->CheckFlag(f)) return false;
}
return true;
}
+Representation HBranch::observed_input_representation(int index) {
+ static const ToBooleanStub::Types tagged_types(
+ ToBooleanStub::UNDEFINED |
+ ToBooleanStub::NULL_TYPE |
+ ToBooleanStub::SPEC_OBJECT |
+ ToBooleanStub::STRING);
+ if (expected_input_types_.ContainsAnyOf(tagged_types)) {
+ return Representation::Tagged();
+ } else if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ return Representation::Double();
+ } else if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
+ return Representation::Integer32();
+ } else {
+ return Representation::None();
+ }
+}
+
+
void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" (%p)", *map());
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
if (!value->IsPhi()) {
- Representation rep = value->ObservedInputRepresentation(it.index());
+ Representation rep = value->observed_input_representation(it.index());
non_phi_uses_[rep.kind()] += value->LoopWeight();
if (FLAG_trace_representation) {
- PrintF("%d %s is used by %d %s as %s\n",
- this->id(),
- this->Mnemonic(),
- value->id(),
- value->Mnemonic(),
- rep.Mnemonic());
+ PrintF("#%d Phi is used by real #%d %s as %s\n",
+ id(), value->id(), value->Mnemonic(), rep.Mnemonic());
}
}
}
void HPhi::AddNonPhiUsesFrom(HPhi* other) {
if (FLAG_trace_representation) {
- PrintF("adding to %d %s uses of %d %s: i%d d%d t%d\n",
- this->id(),
- this->Mnemonic(),
- other->id(),
- other->Mnemonic(),
+ PrintF("adding to #%d Phi uses of #%d Phi: i%d d%d t%d\n",
+ id(), other->id(),
other->non_phi_uses_[Representation::kInteger32],
other->non_phi_uses_[Representation::kDouble],
other->non_phi_uses_[Representation::kTagged]);
}
-void HPhi::ResetInteger32Uses() {
- non_phi_uses_[Representation::kInteger32] = 0;
- indirect_uses_[Representation::kInteger32] = 0;
+void HSimulate::MergeInto(HSimulate* other) {
+ for (int i = 0; i < values_.length(); ++i) {
+ HValue* value = values_[i];
+ if (HasAssignedIndexAt(i)) {
+ other->AddAssignedValue(GetAssignedIndexAt(i), value);
+ } else {
+ if (other->pop_count_ > 0) {
+ other->pop_count_--;
+ } else {
+ other->AddPushedValue(value);
+ }
+ }
+ }
+ other->pop_count_ += pop_count();
}
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /");
- for (int i = 0; i < values_.length(); ++i) {
+ for (int i = values_.length() - 1; i >= 0; --i) {
if (i > 0) stream->Add(",");
if (HasAssignedIndexAt(i)) {
stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
: handle_(handle),
has_int32_value_(false),
has_double_value_(false) {
- set_representation(r);
SetFlag(kUseGVN);
if (handle_->IsNumber()) {
double n = handle_->Number();
double_value_ = n;
has_double_value_ = true;
}
+ if (r.IsNone()) {
+ if (has_int32_value_) {
+ r = Representation::Integer32();
+ } else if (has_double_value_) {
+ r = Representation::Double();
+ } else {
+ r = Representation::Tagged();
+ }
+ }
+ set_representation(r);
}
}
+void HBinaryOperation::InferRepresentation(HInferRepresentation* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ Representation new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+ // When the operation has information about its own output type, don't look
+ // at uses.
+ if (!observed_output_representation_.IsNone()) return;
+ new_rep = RepresentationFromUses();
+ UpdateRepresentation(new_rep, h_infer, "uses");
+}
+
+
+Representation HBinaryOperation::RepresentationFromInputs() {
+ // Determine the worst case of observed input representations and
+ // the currently assumed output representation.
+ Representation rep = representation();
+ if (observed_output_representation_.is_more_general_than(rep)) {
+ rep = observed_output_representation_;
+ }
+ for (int i = 1; i <= 2; ++i) {
+ Representation input_rep = observed_input_representation(i);
+ if (input_rep.is_more_general_than(rep)) rep = input_rep;
+ }
+ // If any of the actual input representation is more general than what we
+ // have so far but not Tagged, use that representation instead.
+ Representation left_rep = left()->representation();
+ Representation right_rep = right()->representation();
+
+ if (left_rep.is_more_general_than(rep) &&
+ left()->CheckFlag(kFlexibleRepresentation)) {
+ rep = left_rep;
+ }
+ if (right_rep.is_more_general_than(rep) &&
+ right()->CheckFlag(kFlexibleRepresentation)) {
+ rep = right_rep;
+ }
+ return rep;
+}
+
+
+void HBinaryOperation::AssumeRepresentation(Representation r) {
+ set_observed_input_representation(r, r);
+ HValue::AssumeRepresentation(r);
+}
+
+
Range* HBitwise::InferRange(Zone* zone) {
if (op() == Token::BIT_XOR) return HValue::InferRange(zone);
const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
}
-void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
- input_representation_ = r;
- if (r.IsDouble()) {
+void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
+ Representation rep = Representation::None();
+ Representation left_rep = left()->representation();
+ Representation right_rep = right()->representation();
+ bool observed_integers =
+ observed_input_representation(0).IsInteger32() &&
+ observed_input_representation(1).IsInteger32();
+ bool inputs_are_not_doubles =
+ !left_rep.IsDouble() && !right_rep.IsDouble();
+ if (observed_integers && inputs_are_not_doubles) {
+ rep = Representation::Integer32();
+ } else {
+ rep = Representation::Double();
// According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
// and !=) have special handling of undefined, e.g. undefined == undefined
// is 'true'. Relational comparisons have a different semantic, first
if (!Token::IsOrderedRelationalCompareOp(token_)) {
SetFlag(kDeoptimizeOnUndefined);
}
- } else {
- ASSERT(r.IsInteger32());
}
+ ChangeRepresentation(rep);
}
}
-Representation HPhi::InferredRepresentation() {
+void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ // If there are non-Phi uses, and all of them have observed the same
+ // representation, than that's what this Phi is going to use.
+ Representation new_rep = RepresentationObservedByAllNonPhiUses();
+ if (!new_rep.IsNone()) {
+ UpdateRepresentation(new_rep, h_infer, "unanimous use observations");
+ return;
+ }
+ new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+ new_rep = RepresentationFromUses();
+ UpdateRepresentation(new_rep, h_infer, "uses");
+ new_rep = RepresentationFromUseRequirements();
+ UpdateRepresentation(new_rep, h_infer, "use requirements");
+}
+
+
+Representation HPhi::RepresentationObservedByAllNonPhiUses() {
+ int non_phi_use_count = 0;
+ for (int i = Representation::kInteger32;
+ i < Representation::kNumRepresentations; ++i) {
+ non_phi_use_count += non_phi_uses_[i];
+ }
+ if (non_phi_use_count <= 1) return Representation::None();
+ for (int i = 0; i < Representation::kNumRepresentations; ++i) {
+ if (non_phi_uses_[i] == non_phi_use_count) {
+ return Representation::FromKind(static_cast<Representation::Kind>(i));
+ }
+ }
+ return Representation::None();
+}
+
+
+Representation HPhi::RepresentationFromInputs() {
bool double_occurred = false;
bool int32_occurred = false;
for (int i = 0; i < OperandCount(); ++i) {
HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
if (hint_value != NULL) {
Representation hint = hint_value->representation();
+ if (hint.IsTagged()) return hint;
if (hint.IsDouble()) double_occurred = true;
if (hint.IsInteger32()) int32_occurred = true;
}
return Representation::Tagged();
}
} else {
- return Representation::Tagged();
+ if (value->IsPhi() && !IsConvertibleToInteger()) {
+ return Representation::Tagged();
+ }
}
}
}
}
+Representation HPhi::RepresentationFromUseRequirements() {
+ Representation all_uses_require = Representation::None();
+ bool all_uses_require_the_same = true;
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ // We check for observed_input_representation elsewhere.
+ Representation use_rep =
+ it.value()->RequiredInputRepresentation(it.index());
+ // No useful info from this use -> look at the next one.
+ if (use_rep.IsNone()) {
+ continue;
+ }
+ if (use_rep.Equals(all_uses_require)) {
+ continue;
+ }
+ // This use's representation contradicts what we've seen so far.
+ if (!all_uses_require.IsNone()) {
+ ASSERT(!use_rep.Equals(all_uses_require));
+ all_uses_require_the_same = false;
+ break;
+ }
+ // Otherwise, initialize observed representation.
+ all_uses_require = use_rep;
+ }
+ if (all_uses_require_the_same) {
+ return all_uses_require;
+ }
+
+ return Representation::None();
+}
+
+
// Node-specific verification code is only included in debug mode.
#ifdef DEBUG
// Forward declarations.
class HBasicBlock;
class HEnvironment;
+class HInferRepresentation;
class HInstruction;
class HLoopInformation;
class HValue;
public:
enum Kind {
kNone,
- kTagged,
- kDouble,
kInteger32,
+ kDouble,
+ kTagged,
kExternal,
kNumRepresentations
};
static Representation Double() { return Representation(kDouble); }
static Representation External() { return Representation(kExternal); }
+ static Representation FromKind(Kind kind) { return Representation(kind); }
+
bool Equals(const Representation& other) {
return kind_ == other.kind_;
}
+ bool is_more_general_than(const Representation& other) {
+ ASSERT(kind_ != kExternal);
+ ASSERT(other.kind_ != kExternal);
+ return kind_ > other.kind_;
+ }
+
Kind kind() const { return static_cast<Kind>(kind_); }
bool IsNone() const { return kind_ == kNone; }
bool IsTagged() const { return kind_ == kTagged; }
virtual bool EmitAtUses() { return false; }
Representation representation() const { return representation_; }
void ChangeRepresentation(Representation r) {
- // Representation was already set and is allowed to be changed.
- ASSERT(!r.IsNone());
ASSERT(CheckFlag(kFlexibleRepresentation));
RepresentationChanged(r);
representation_ = r;
+ if (r.IsTagged()) {
+ // Tagged is the bottom of the lattice, don't go any further.
+ ClearFlag(kFlexibleRepresentation);
+ }
}
- void AssumeRepresentation(Representation r);
+ virtual void AssumeRepresentation(Representation r);
virtual bool IsConvertibleToInteger() const { return true; }
void ComputeInitialRange(Zone* zone);
// Representation helpers.
- virtual Representation RequiredInputRepresentation(int index) = 0;
-
- virtual Representation InferredRepresentation() {
- return representation();
- }
-
- // Type feedback access.
- virtual Representation ObservedInputRepresentation(int index) {
- return RequiredInputRepresentation(index);
+ virtual Representation observed_input_representation(int index) {
+ return Representation::None();
}
+ virtual Representation RequiredInputRepresentation(int index) = 0;
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
// This gives the instruction an opportunity to replace itself with an
// instruction that does the same in some better way. To replace an
UNREACHABLE();
return false;
}
+
+ virtual Representation RepresentationFromInputs() {
+ return representation();
+ }
+ Representation RepresentationFromUses();
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentation* h_infer,
+ const char* reason);
+ void AddDependantsToWorklist(HInferRepresentation* h_infer);
+
virtual void RepresentationChanged(Representation to) { }
+
virtual Range* InferRange(Zone* zone);
virtual void DeleteFromGraph() = 0;
virtual void InternalSetOperandAt(int index, HValue* value) = 0;
}
void set_representation(Representation r) {
- // Representation is set-once.
ASSERT(representation_.IsNone() && !r.IsNone());
representation_ = r;
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
+ virtual Representation observed_input_representation(int index);
ToBooleanStub::Types expected_input_types() const {
return expected_input_types_;
};
+enum RemovableSimulate {
+ REMOVABLE_SIMULATE,
+ FIXED_SIMULATE
+};
+
+
class HSimulate: public HInstruction {
public:
- HSimulate(BailoutId ast_id, int pop_count, Zone* zone)
+ HSimulate(BailoutId ast_id,
+ int pop_count,
+ Zone* zone,
+ RemovableSimulate removable)
: ast_id_(ast_id),
pop_count_(pop_count),
values_(2, zone),
assigned_indexes_(2, zone),
- zone_(zone) {}
+ zone_(zone),
+ removable_(removable) {}
virtual ~HSimulate() {}
virtual void PrintDataTo(StringStream* stream);
return Representation::None();
}
+ void MergeInto(HSimulate* other);
+ bool is_candidate_for_removal() { return removable_ == REMOVABLE_SIMULATE; }
+
DECLARE_CONCRETE_INSTRUCTION(Simulate)
#ifdef DEBUG
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
Zone* zone_;
+ RemovableSimulate removable_;
};
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Integer32();
}
+ virtual Representation observed_input_representation(int index) {
+ return Representation::Integer32();
+ }
virtual HType CalculateInferredType();
virtual HValue* Canonicalize();
set_representation(Representation::Integer32());
break;
case kMathAbs:
- set_representation(Representation::Tagged());
+ // Not setting representation here: it is None intentionally.
SetFlag(kFlexibleRepresentation);
SetGVNFlag(kChangesNewSpacePromotion);
break;
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
+
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
indirect_uses_[i] = 0;
}
ASSERT(merged_index >= 0);
- set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
}
- virtual Representation InferredRepresentation();
+ virtual Representation RepresentationFromInputs();
virtual Range* InferRange(Zone* zone);
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
+ Representation RepresentationObservedByAllNonPhiUses();
+ Representation RepresentationFromUseRequirements();
virtual Representation RequiredInputRepresentation(int index) {
return representation();
}
bool AllOperandsConvertibleToInteger() {
for (int i = 0; i < OperandCount(); ++i) {
if (!OperandAt(i)->IsConvertibleToInteger()) {
+ if (FLAG_trace_representation) {
+ HValue* input = OperandAt(i);
+ PrintF("#%d %s: Input #%d %s at %d is NCTI\n",
+ id(), Mnemonic(), input->id(), input->Mnemonic(), i);
+ }
return false;
}
}
return true;
}
- void ResetInteger32Uses();
-
protected:
virtual void DeleteFromGraph();
virtual void InternalSetOperandAt(int index, HValue* value) {
class HBinaryOperation: public HTemplateInstruction<3> {
public:
- HBinaryOperation(HValue* context, HValue* left, HValue* right) {
+ HBinaryOperation(HValue* context, HValue* left, HValue* right)
+ : observed_output_representation_(Representation::None()) {
ASSERT(left != NULL && right != NULL);
SetOperandAt(0, context);
SetOperandAt(1, left);
SetOperandAt(2, right);
+ observed_input_representation_[0] = Representation::None();
+ observed_input_representation_[1] = Representation::None();
}
HValue* context() { return OperandAt(0); }
return right();
}
+ void set_observed_input_representation(Representation left,
+ Representation right) {
+ observed_input_representation_[0] = left;
+ observed_input_representation_[1] = right;
+ }
+
+ virtual void initialize_output_representation(Representation observed) {
+ observed_output_representation_ = observed;
+ }
+
+ virtual Representation observed_input_representation(int index) {
+ if (index == 0) return Representation::Tagged();
+ return observed_input_representation_[index - 1];
+ }
+
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
+ virtual Representation RepresentationFromInputs();
+ virtual void AssumeRepresentation(Representation r);
+
virtual bool IsCommutative() const { return false; }
virtual void PrintDataTo(StringStream* stream);
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
+
+ private:
+ Representation observed_input_representation_[2];
+ Representation observed_output_representation_;
};
}
return Representation::Integer32();
}
+ virtual Representation observed_input_representation(int index) {
+ return Representation::Integer32();
+ }
virtual void PrintDataTo(StringStream* stream);
public:
HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right) {
- set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
+ SetFlag(kTruncatingToInt32);
SetAllSideEffects();
- observed_input_representation_[0] = Representation::Tagged();
- observed_input_representation_[1] = Representation::None();
- observed_input_representation_[2] = Representation::None();
}
virtual Representation RequiredInputRepresentation(int index) {
if (!to.IsTagged()) {
ASSERT(to.IsInteger32());
ClearAllSideEffects();
- SetFlag(kTruncatingToInt32);
SetFlag(kUseGVN);
+ } else {
+ SetAllSideEffects();
+ ClearFlag(kUseGVN);
}
}
- virtual HType CalculateInferredType();
-
- virtual Representation ObservedInputRepresentation(int index) {
- return observed_input_representation_[index];
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentation* h_infer,
+ const char* reason) {
+ // We only generate either int32 or generic tagged bitwise operations.
+ if (new_rep.IsDouble()) new_rep = Representation::Integer32();
+ HValue::UpdateRepresentation(new_rep, h_infer, reason);
}
- void InitializeObservedInputRepresentation(Representation r) {
- observed_input_representation_[1] = r;
- observed_input_representation_[2] = r;
+ virtual void initialize_output_representation(Representation observed) {
+ if (observed.IsDouble()) observed = Representation::Integer32();
+ HBinaryOperation::initialize_output_representation(observed);
}
+ virtual HType CalculateInferredType();
+
DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
private:
virtual bool IsDeletable() const { return true; }
-
- Representation observed_input_representation_[3];
};
public:
HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right) {
- set_representation(Representation::Tagged());
- SetFlag(kFlexibleRepresentation);
SetAllSideEffects();
+ SetFlag(kFlexibleRepresentation);
}
virtual void RepresentationChanged(Representation to) {
- if (!to.IsTagged()) {
+ if (to.IsTagged()) {
+ SetAllSideEffects();
+ ClearFlag(kUseGVN);
+ } else {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
: representation();
}
- virtual Representation InferredRepresentation() {
- if (left()->representation().Equals(right()->representation())) {
- return left()->representation();
- }
- return HValue::InferredRepresentation();
- }
-
private:
virtual bool IsDeletable() const { return true; }
};
}
virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- Representation GetInputRepresentation() const {
- return Representation::Tagged();
+ return index == 0
+ ? Representation::Tagged()
+ : representation();
}
Token::Value token() const { return token_; }
public:
HCompareIDAndBranch(HValue* left, HValue* right, Token::Value token)
: token_(token) {
+ SetFlag(kFlexibleRepresentation);
ASSERT(Token::IsCompareOp(token));
SetOperandAt(0, left);
SetOperandAt(1, right);
HValue* right() { return OperandAt(1); }
Token::Value token() const { return token_; }
- void SetInputRepresentation(Representation r);
- Representation GetInputRepresentation() const {
- return input_representation_;
+ void set_observed_input_representation(Representation left,
+ Representation right) {
+ observed_input_representation_[0] = left;
+ observed_input_representation_[1] = right;
}
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
+
virtual Representation RequiredInputRepresentation(int index) {
- return input_representation_;
+ return representation();
+ }
+ virtual Representation observed_input_representation(int index) {
+ return observed_input_representation_[index];
}
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(CompareIDAndBranch)
private:
- Representation input_representation_;
+ Representation observed_input_representation_[2];
Token::Value token_;
};
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
+ virtual Representation observed_input_representation(int index) {
+ return Representation::Tagged();
+ }
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)
? Representation::Double()
: Representation::None();
}
+ virtual Representation observed_input_representation(int index) {
+ return RequiredInputRepresentation(index);
+ }
DECLARE_CONCRETE_INSTRUCTION(Power)
operation_(op) { }
virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Tagged()
- : representation();
- }
+ return index == 0 ? Representation::Tagged()
+ : representation();
+ }
- virtual Representation InferredRepresentation() {
- if (left()->representation().IsInteger32() &&
- right()->representation().IsInteger32()) {
+ virtual Representation observed_input_representation(int index) {
+ return RequiredInputRepresentation(index);
+ }
+
+ virtual Representation RepresentationFromInputs() {
+ Representation left_rep = left()->representation();
+ Representation right_rep = right()->representation();
+ if ((left_rep.IsNone() || left_rep.IsInteger32()) &&
+ (right_rep.IsNone() || right_rep.IsInteger32())) {
return Representation::Integer32();
}
return Representation::Double();
return Representation::None();
}
+ virtual Representation observed_input_representation(int index) {
+ return RequiredInputRepresentation(index);
+ }
+
virtual void PrintDataTo(StringStream* stream);
bool RequiresHoleCheck() const;
} else {
SetGVNFlag(kChangesArrayElements);
}
+
+ // EXTERNAL_{UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
+ if (elements_kind >= EXTERNAL_BYTE_ELEMENTS &&
+ elements_kind <= EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ SetFlag(kTruncatingToInt32);
+ }
}
virtual Representation RequiredInputRepresentation(int index) {
bool is_external() const {
return IsExternalArrayElementsKind(elements_kind());
}
+
+ virtual Representation observed_input_representation(int index) {
+ if (index < 2) return RequiredInputRepresentation(index);
+ if (IsDoubleOrFloatElementsKind(elements_kind())) {
+ return Representation::Double();
+ }
+ if (is_external()) {
+ return Representation::Integer32();
+ }
+ // For fast object elements kinds, don't assume anything.
+ return Representation::None();
+ }
+
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
}
-HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id) {
+HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
+ RemovableSimulate removable) {
ASSERT(HasEnvironment());
HEnvironment* environment = last_environment();
ASSERT(ast_id.IsNone() ||
int push_count = environment->push_count();
int pop_count = environment->pop_count();
- HSimulate* instr = new(zone()) HSimulate(ast_id, pop_count, zone());
- for (int i = push_count - 1; i >= 0; --i) {
+ HSimulate* instr =
+ new(zone()) HSimulate(ast_id, pop_count, zone(), removable);
+ // Order of pushed values: newest (top of stack) first. This allows
+ // HSimulate::MergeInto() to easily append additional pushed values
+ // that are older (from further down the stack).
+ for (int i = 0; i < push_count; ++i) {
instr->AddPushedValue(environment->ExpressionStackAt(i));
}
for (int i = 0; i < environment->assigned_variables()->length(); ++i) {
void HRangeAnalysis::InferControlFlowRange(HCompareIDAndBranch* test,
HBasicBlock* dest) {
ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
- if (test->GetInputRepresentation().IsInteger32()) {
+ if (test->representation().IsInteger32()) {
Token::Value op = test->token();
if (test->SecondSuccessor() == dest) {
op = Token::NegateCompareOp(op);
}
-class HInferRepresentation BASE_EMBEDDED {
- public:
- explicit HInferRepresentation(HGraph* graph)
- : graph_(graph),
- worklist_(8, graph->zone()),
- in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
-
- void Analyze();
-
- private:
- Representation TryChange(HValue* current);
- void AddToWorklist(HValue* current);
- void InferBasedOnInputs(HValue* current);
- void AddDependantsToWorklist(HValue* current);
- void InferBasedOnUses(HValue* current);
-
- Zone* zone() const { return graph_->zone(); }
-
- HGraph* graph_;
- ZoneList<HValue*> worklist_;
- BitVector in_worklist_;
-};
-
-
void HInferRepresentation::AddToWorklist(HValue* current) {
- if (current->representation().IsSpecialization()) return;
+ if (current->representation().IsTagged()) return;
if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
if (in_worklist_.Contains(current->id())) return;
worklist_.Add(current, zone());
}
-// This method tries to specialize the representation type of the value
-// given as a parameter. The value is asked to infer its representation type
-// based on its inputs. If the inferred type is more specialized, then this
-// becomes the new representation type of the node.
-void HInferRepresentation::InferBasedOnInputs(HValue* current) {
- Representation r = current->representation();
- if (r.IsSpecialization()) return;
- ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
- Representation inferred = current->InferredRepresentation();
- if (inferred.IsSpecialization()) {
- if (FLAG_trace_representation) {
- PrintF("Changing #%d representation %s -> %s based on inputs\n",
- current->id(),
- r.Mnemonic(),
- inferred.Mnemonic());
- }
- current->ChangeRepresentation(inferred);
- AddDependantsToWorklist(current);
- }
-}
-
-
-void HInferRepresentation::AddDependantsToWorklist(HValue* value) {
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- AddToWorklist(it.value());
- }
- for (int i = 0; i < value->OperandCount(); ++i) {
- AddToWorklist(value->OperandAt(i));
- }
-}
-
-
-// This method calculates whether specializing the representation of the value
-// given as the parameter has a benefit in terms of less necessary type
-// conversions. If there is a benefit, then the representation of the value is
-// specialized.
-void HInferRepresentation::InferBasedOnUses(HValue* value) {
- Representation r = value->representation();
- if (r.IsSpecialization() || value->HasNoUses()) return;
- ASSERT(value->CheckFlag(HValue::kFlexibleRepresentation));
- Representation new_rep = TryChange(value);
- if (!new_rep.IsNone()) {
- if (!value->representation().Equals(new_rep)) {
- if (FLAG_trace_representation) {
- PrintF("Changing #%d representation %s -> %s based on uses\n",
- value->id(),
- r.Mnemonic(),
- new_rep.Mnemonic());
- }
- value->ChangeRepresentation(new_rep);
- AddDependantsToWorklist(value);
- }
- }
-}
-
-
-Representation HInferRepresentation::TryChange(HValue* value) {
- // Array of use counts for each representation.
- int use_count[Representation::kNumRepresentations] = { 0 };
-
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- Representation rep = use->ObservedInputRepresentation(it.index());
- if (rep.IsNone()) continue;
- if (FLAG_trace_representation) {
- PrintF("%d %s is used by %d %s as %s\n",
- value->id(),
- value->Mnemonic(),
- use->id(),
- use->Mnemonic(),
- rep.Mnemonic());
- }
- if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]);
- use_count[rep.kind()] += use->LoopWeight();
- }
- int tagged_count = use_count[Representation::kTagged];
- int double_count = use_count[Representation::kDouble];
- int int32_count = use_count[Representation::kInteger32];
- int non_tagged_count = double_count + int32_count;
-
- // If a non-loop phi has tagged uses, don't convert it to untagged.
- if (value->IsPhi() && !value->block()->IsLoopHeader() && tagged_count > 0) {
- return Representation::None();
- }
-
- // Prefer unboxing over boxing, the latter is more expensive.
- if (tagged_count > non_tagged_count) return Representation::None();
-
- // Prefer Integer32 over Double, if possible.
- if (int32_count > 0 && value->IsConvertibleToInteger()) {
- return Representation::Integer32();
- }
-
- if (double_count > 0) return Representation::Double();
-
- return Representation::None();
-}
-
-
void HInferRepresentation::Analyze() {
HPhase phase("H_Infer representations", graph_);
it.Advance()) {
HPhi* phi = phi_list->at(it.Current());
phi->set_is_convertible_to_integer(false);
- phi->ResetInteger32Uses();
}
}
while (!worklist_.is_empty()) {
HValue* current = worklist_.RemoveLast();
in_worklist_.Remove(current->id());
- InferBasedOnInputs(current);
- InferBasedOnUses(current);
+ current->InferRepresentation(this);
+ }
+
+ // Lastly: any instruction that we don't have representation information
+ // for defaults to Tagged.
+ for (int i = 0; i < graph_->blocks()->length(); ++i) {
+ HBasicBlock* block = graph_->blocks()->at(i);
+ const ZoneList<HPhi*>* phis = block->phis();
+ for (int j = 0; j < phis->length(); ++j) {
+ HPhi* phi = phis->at(j);
+ if (phi->representation().IsNone()) {
+ phi->ChangeRepresentation(Representation::Tagged());
+ }
+ }
+ for (HInstruction* current = block->first();
+ current != NULL; current = current->next()) {
+ if (current->representation().IsNone() &&
+ current->CheckFlag(HInstruction::kFlexibleRepresentation)) {
+ current->ChangeRepresentation(Representation::Tagged());
+ }
+ }
+ }
+}
+
+
+void HGraph::MergeRemovableSimulates() {
+ for (int i = 0; i < blocks()->length(); ++i) {
+ HBasicBlock* block = blocks()->at(i);
+ // Always reset the folding candidate at the start of a block.
+ HSimulate* folding_candidate = NULL;
+ // Nasty heuristic: Never remove the first simulate in a block. This
+ // just so happens to have a beneficial effect on register allocation.
+ bool first = true;
+ for (HInstruction* current = block->first();
+ current != NULL; current = current->next()) {
+ if (current->IsLeaveInlined()) {
+ // Never fold simulates from inlined environments into simulates
+ // in the outer environment.
+ // (Before each HEnterInlined, there is a non-foldable HSimulate
+ // anyway, so we get the barrier in the other direction for free.)
+ if (folding_candidate != NULL) {
+ folding_candidate->DeleteAndReplaceWith(NULL);
+ }
+ folding_candidate = NULL;
+ continue;
+ }
+ // If we have an HSimulate and a candidate, perform the folding.
+ if (!current->IsSimulate()) continue;
+ if (first) {
+ first = false;
+ continue;
+ }
+ HSimulate* current_simulate = HSimulate::cast(current);
+ if (folding_candidate != NULL) {
+ folding_candidate->MergeInto(current_simulate);
+ folding_candidate->DeleteAndReplaceWith(NULL);
+ folding_candidate = NULL;
+ }
+ // Check if the current simulate is a candidate for folding.
+ if (current_simulate->previous()->HasObservableSideEffects() &&
+ !current_simulate->next()->IsSimulate()) {
+ continue;
+ }
+ if (!current_simulate->is_candidate_for_removal()) {
+ continue;
+ }
+ folding_candidate = current_simulate;
+ }
}
}
} else {
next = HInstruction::cast(use_value);
}
-
// For constants we try to make the representation change at compile
// time. When a representation change is not possible without loss of
// information we treat constants like normal instructions and insert the
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
// Try to create a new copy of the constant with the new representation.
- new_value = is_truncating
+ new_value = (is_truncating && to.IsInteger32())
? constant->CopyToTruncatedInt32(zone())
: constant->CopyToRepresentation(to, zone());
}
for (int i = 0; i < phi_list()->length(); i++) {
HPhi* phi = phi_list()->at(i);
if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
- if (!phi->CheckUsesForFlag(HValue::kTruncatingToInt32)) {
- phi->ClearFlag(HValue::kTruncatingToInt32);
- change = true;
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ // If a Phi is used as a non-truncating int32 or as a double,
+ // clear its "truncating" flag.
+ HValue* use = it.value();
+ Representation input_representation =
+ use->RequiredInputRepresentation(it.index());
+ if ((input_representation.IsInteger32() &&
+ !use->CheckFlag(HValue::kTruncatingToInt32)) ||
+ input_representation.IsDouble()) {
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating because of #%d %s\n",
+ phi->id(), it.value()->id(), it.value()->Mnemonic());
+ }
+ phi->ClearFlag(HValue::kTruncatingToInt32);
+ change = true;
+ break;
+ }
}
}
}
// Process normal instructions.
HInstruction* current = blocks_[i]->first();
while (current != NULL) {
+ HInstruction* next = current->next();
InsertRepresentationChangesForValue(current);
- current = current->next();
+ current = next;
}
}
}
void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
owner()->AddInstruction(instr);
- if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) {
+ owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ }
}
}
owner()->AddInstruction(instr);
owner()->Push(instr);
- if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) {
+ owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ }
}
// this one isn't actually needed (and wouldn't work if it were targeted).
if (instr->HasObservableSideEffects()) {
builder->Push(instr);
- builder->AddSimulate(ast_id);
+ builder->AddSimulate(ast_id, REMOVABLE_SIMULATE);
builder->Pop();
}
BuildBranch(instr);
HInferRepresentation rep(this);
rep.Analyze();
+ // Remove HSimulate instructions that have turned out not to be needed
+ // after all by folding them into the following HSimulate.
+ // This must happen after inferring representations.
+ MergeRemovableSimulates();
+
MarkDeoptimizeOnUndefined();
InsertRepresentationChanges();
}
-void HGraphBuilder::AddSimulate(BailoutId ast_id) {
+void HGraphBuilder::AddSimulate(BailoutId ast_id, RemovableSimulate removable) {
ASSERT(current_block() != NULL);
- current_block()->AddSimulate(ast_id);
+ current_block()->AddSimulate(ast_id, removable);
}
!clause->label()->IsStringLiteral()) ||
(switch_type == SMI_SWITCH &&
!clause->label()->IsSmiLiteral())) {
- return Bailout("SwitchStatemnt: mixed label types are not supported");
+ return Bailout("SwitchStatement: mixed label types are not supported");
}
}
new(zone()) HCompareIDAndBranch(tag_value,
label_value,
Token::EQ_STRICT);
- compare_->SetInputRepresentation(Representation::Integer32());
+ compare_->set_observed_input_representation(
+ Representation::Integer32(), Representation::Integer32());
compare = compare_;
} else {
compare = new(zone()) HStringCompareAndBranch(context, tag_value,
- label_value,
- Token::EQ_STRICT);
+ label_value,
+ Token::EQ_STRICT);
}
compare->SetSuccessorAt(0, body_block);
// Check that we still have more keys.
HCompareIDAndBranch* compare_index =
new(zone()) HCompareIDAndBranch(index, limit, Token::LT);
- compare_index->SetInputRepresentation(Representation::Integer32());
+ compare_index->set_observed_input_representation(
+ Representation::Integer32(), Representation::Integer32());
HBasicBlock* loop_body = graph()->CreateBasicBlock();
HBasicBlock* loop_successor = graph()->CreateBasicBlock();
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
HConstant* instr =
- new(zone()) HConstant(expr->handle(), Representation::Tagged());
+ new(zone()) HConstant(expr->handle(), Representation::None());
return ast_context()->ReturnInstruction(instr, expr->id());
}
map));
}
AddInstruction(store);
- if (store->HasObservableSideEffects()) AddSimulate(key->id());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(key->id(), REMOVABLE_SIMULATE);
+ }
} else {
CHECK_ALIVE(VisitForEffect(value));
}
// unoptimized code).
if (instr->HasObservableSideEffects()) {
if (ast_context()->IsEffect()) {
- AddSimulate(expr->id());
+ AddSimulate(expr->id(), REMOVABLE_SIMULATE);
} else {
Push(value);
- AddSimulate(expr->id());
+ AddSimulate(expr->id(), REMOVABLE_SIMULATE);
Drop(1);
}
}
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ }
return ast_context()->ReturnValue(Pop());
} else {
&has_side_effects);
Push(value);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
}
}
new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
instr->set_position(position);
AddInstruction(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ }
} else {
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
instr->set_position(position);
AddInstruction(instr);
ASSERT(instr->HasObservableSideEffects());
- if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
+ AddSimulate(ast_id, REMOVABLE_SIMULATE);
}
}
new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
break;
}
load = BuildLoadNamedGeneric(object, name, prop);
}
PushAndAdd(load);
- if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
+ if (load->HasObservableSideEffects()) {
+ AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+ }
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(operation->id(), REMOVABLE_SIMULATE);
+ }
HInstruction* store;
if (!monomorphic) {
// Drop the simulated receiver and value. Return the value.
Drop(2);
Push(instr);
- if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ }
return ast_context()->ReturnValue(Pop());
} else {
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId());
+ if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
CHECK_ALIVE(VisitForValue(expr->value()));
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(operation->id(), REMOVABLE_SIMULATE);
+ }
expr->RecordTypeFeedback(oracle(), zone());
HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
Drop(3);
Push(instr);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
}
context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
return ast_context()->ReturnValue(Pop());
}
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- if (!val->representation().IsInteger32()) {
- val = AddInstruction(new(zone()) HChange(
- val,
- Representation::Integer32(),
- true, // Truncate to int32.
- false)); // Don't deoptimize undefined (irrelevant here).
- }
break;
}
case EXTERNAL_FLOAT_ELEMENTS:
&has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
- AddSimulate(expr->id());
+ AddSimulate(expr->id(), REMOVABLE_SIMULATE);
} else {
Push(load);
- AddSimulate(expr->id());
+ AddSimulate(expr->id(), REMOVABLE_SIMULATE);
Drop(1);
}
}
HInstruction* instr =
new(zone()) HMul(context, value, graph_->GetConstantMinus1());
TypeInfo info = oracle()->UnaryType(expr);
+ Representation rep = ToRepresentation(info);
if (info.IsUninitialized()) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
info = TypeInfo::Unknown();
}
- Representation rep = ToRepresentation(info);
- TraceRepresentation(expr->op(), info, instr, rep);
- instr->AssumeRepresentation(rep);
+ HBinaryOperation::cast(instr)->set_observed_input_representation(rep, rep);
return ast_context()->ReturnInstruction(instr, expr->id());
}
: graph_->GetConstantMinus1();
HValue* context = environment()->LookupContext();
HInstruction* instr = new(zone()) HAdd(context, Top(), delta);
- TraceRepresentation(expr->op(), info, instr, rep);
+ // We can't insert a simulate here, because it would break deoptimization,
+ // so the HAdd must not have side effects, so we must freeze its
+ // representation.
instr->AssumeRepresentation(rep);
+ instr->ClearAllSideEffects();
AddInstruction(instr);
return instr;
}
new(zone()) HStoreContextSlot(context, var->index(), mode, after);
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
break;
}
load = BuildLoadNamedGeneric(object, name, prop);
}
PushAndAdd(load);
- if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
+ if (load->HasObservableSideEffects()) {
+ AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+ }
after = BuildIncrement(returns_original_input, expr);
input = Pop();
// necessary.
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ }
} else {
// Keyed property.
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId());
+ if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
after = BuildIncrement(returns_original_input, expr);
input = Pop();
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
}
HValue* left,
HValue* right) {
HValue* context = environment()->LookupContext();
- TypeInfo info = oracle()->BinaryType(expr);
- if (info.IsUninitialized()) {
+ TypeInfo left_info, right_info, result_info, combined_info;
+ oracle()->BinaryType(expr, &left_info, &right_info, &result_info);
+ Representation left_rep = ToRepresentation(left_info);
+ Representation right_rep = ToRepresentation(right_info);
+ Representation result_rep = ToRepresentation(result_info);
+ if (left_info.IsUninitialized()) {
+ // Can't have initialized one but not the other.
+ ASSERT(right_info.IsUninitialized());
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
- info = TypeInfo::Unknown();
+ left_info = right_info = TypeInfo::Unknown();
}
HInstruction* instr = NULL;
switch (expr->op()) {
case Token::ADD:
- if (info.IsString()) {
+ if (left_info.IsString() && right_info.IsString()) {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
break;
case Token::BIT_OR: {
HValue* operand, *shift_amount;
- if (info.IsInteger32() &&
+ if (left_info.IsInteger32() && right_info.IsInteger32() &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
instr = new(zone()) HRor(context, operand, shift_amount);
} else {
UNREACHABLE();
}
- // If we hit an uninitialized binary op stub we will get type info
- // for a smi operation. If one of the operands is a constant string
- // do not generate code assuming it is a smi operation.
- if (info.IsSmi() &&
- ((left->IsConstant() && HConstant::cast(left)->handle()->IsString()) ||
- (right->IsConstant() && HConstant::cast(right)->handle()->IsString()))) {
- return instr;
- }
- Representation rep = ToRepresentation(info);
- // We only generate either int32 or generic tagged bitwise operations.
- if (instr->IsBitwiseBinaryOperation()) {
- HBitwiseBinaryOperation::cast(instr)->
- InitializeObservedInputRepresentation(rep);
- if (rep.IsDouble()) rep = Representation::Integer32();
+ if (instr->IsBinaryOperation()) {
+ HBinaryOperation* binop = HBinaryOperation::cast(instr);
+ binop->set_observed_input_representation(left_rep, right_rep);
+ binop->initialize_output_representation(result_rep);
}
- TraceRepresentation(expr->op(), info, instr, rep);
- instr->AssumeRepresentation(rep);
return instr;
}
}
-void HGraphBuilder::TraceRepresentation(Token::Value op,
- TypeInfo info,
- HValue* value,
- Representation rep) {
- if (!FLAG_trace_representation) return;
- // TODO(svenpanne) Under which circumstances are we actually not flexible?
- // At first glance, this looks a bit weird...
- bool flexible = value->CheckFlag(HValue::kFlexibleRepresentation);
- PrintF("Operation %s has type info %s, %schange representation assumption "
- "for %s (ID %d) from %s to %s\n",
- Token::Name(op),
- info.ToString(),
- flexible ? "" : " DO NOT ",
- value->Mnemonic(),
- graph_->GetMaximumValueID(),
- value->representation().Mnemonic(),
- rep.Mnemonic());
-}
-
-
Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
+ if (info.IsUninitialized()) return Representation::None();
if (info.IsSmi()) return Representation::Integer32();
if (info.IsInteger32()) return Representation::Integer32();
if (info.IsDouble()) return Representation::Double();
return ast_context()->ReturnControl(instr, expr->id());
}
- TypeInfo type_info = oracle()->CompareType(expr);
+ TypeInfo left_type, right_type, overall_type_info;
+ oracle()->CompareType(expr, &left_type, &right_type, &overall_type_info);
+ Representation combined_rep = ToRepresentation(overall_type_info);
+ Representation left_rep = ToRepresentation(left_type);
+ Representation right_rep = ToRepresentation(right_type);
// Check if this expression was ever executed according to type feedback.
// Note that for the special typeof/null/undefined cases we get unknown here.
- if (type_info.IsUninitialized()) {
+ if (overall_type_info.IsUninitialized()) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
- type_info = TypeInfo::Unknown();
+ overall_type_info = left_type = right_type = TypeInfo::Unknown();
}
CHECK_ALIVE(VisitForValue(expr->left()));
HIn* result = new(zone()) HIn(context, left, right);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
- } else if (type_info.IsNonPrimitive()) {
+ } else if (overall_type_info.IsNonPrimitive()) {
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
default:
return Bailout("Unsupported non-primitive compare");
}
- } else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
- (op == Token::EQ || op == Token::EQ_STRICT)) {
+ } else if (overall_type_info.IsSymbol() && Token::IsEqualityOp(op)) {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(HCheckInstanceType::NewIsSymbol(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
} else {
- Representation r = ToRepresentation(type_info);
- if (r.IsTagged()) {
+ if (combined_rep.IsTagged() || combined_rep.IsNone()) {
HCompareGeneric* result =
new(zone()) HCompareGeneric(context, left, right, op);
+ result->set_observed_input_representation(left_rep, right_rep);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
HCompareIDAndBranch* result =
new(zone()) HCompareIDAndBranch(left, right, op);
+ result->set_observed_input_representation(left_rep, right_rep);
result->set_position(expr->position());
- result->SetInputRepresentation(r);
return ast_context()->ReturnControl(result, expr->id());
}
}
HStoreContextSlot* store = new(zone()) HStoreContextSlot(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
AddInstruction(store);
- if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
+ }
}
break;
case Variable::LOOKUP:
HStoreContextSlot* store = new(zone()) HStoreContextSlot(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
AddInstruction(store);
- if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
+ }
break;
}
case Variable::LOOKUP:
void Goto(HBasicBlock* block, FunctionState* state = NULL);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
- void AddSimulate(BailoutId ast_id) { AddInstruction(CreateSimulate(ast_id)); }
+ void AddSimulate(BailoutId ast_id,
+ RemovableSimulate removable = FIXED_SIMULATE) {
+ AddInstruction(CreateSimulate(ast_id, removable));
+ }
void AssignCommonDominator(HBasicBlock* other);
void AssignLoopSuccessorDominators();
void RegisterPredecessor(HBasicBlock* pred);
void AddDominatedBlock(HBasicBlock* block);
- HSimulate* CreateSimulate(BailoutId ast_id);
+ HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
int block_id_;
void InitializeInferredTypes();
void InsertTypeConversions();
+ void MergeRemovableSimulates();
void InsertRepresentationChanges();
void MarkDeoptimizeOnUndefined();
void ComputeMinusZeroChecks();
};
+class HInferRepresentation BASE_EMBEDDED {
+ public:
+ explicit HInferRepresentation(HGraph* graph)
+ : graph_(graph),
+ worklist_(8, graph->zone()),
+ in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
+
+ void Analyze();
+ void AddToWorklist(HValue* current);
+
+ private:
+ Zone* zone() const { return graph_->zone(); }
+
+ HGraph* graph_;
+ ZoneList<HValue*> worklist_;
+ BitVector in_worklist_;
+};
+
+
class HGraphBuilder;
enum ArgumentsAllowedFlag {
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(BailoutId ast_id);
+ void AddSimulate(BailoutId ast_id,
+ RemovableSimulate removable = FIXED_SIMULATE);
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
// to push them as outgoing parameters.
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
- void TraceRepresentation(Token::Value op,
- TypeInfo info,
- HValue* value,
- Representation rep);
static Representation ToRepresentation(TypeInfo info);
void SetUpScope(Scope* scope);
static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
Label* non_int32,
Register scratch);
+
+ static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
+ Label* non_int32,
+ XMMRegister operand,
+ Register scratch,
+ XMMRegister xmm_scratch);
};
// Get exponent alone in scratch2.
__ mov(scratch2, scratch);
__ and_(scratch2, HeapNumber::kExponentMask);
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
+ // Load ecx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(ecx, ecx);
+ // If the exponent is above 83, the number contains no significant
+ // bits in the range 0..2^31, so the result is zero.
+ static const uint32_t kResultIsZeroExponent = 83;
+ __ cmp(scratch2, Immediate(kResultIsZeroExponent));
+ __ j(above, &done);
if (use_sse3) {
CpuFeatures::Scope scope(SSE3);
// Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ static const uint32_t kTooBigExponent = 63;
__ cmp(scratch2, Immediate(kTooBigExponent));
__ j(greater_equal, conversion_failure);
// Load x87 register with heap number.
__ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
__ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
} else {
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, ecx);
// Check whether the exponent matches a 32 bit signed int that cannot be
// represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
// exponent is 30 (biased). This is the exponent that we are fastest at and
// also the highest exponent we can handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ const uint32_t non_smi_exponent = 30;
__ cmp(scratch2, Immediate(non_smi_exponent));
// If we have a match of the int32-but-not-Smi exponent then skip some
// logic.
{
// Handle a big exponent. The only reason we have this code is that the
// >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ const uint32_t big_non_smi_exponent = 31;
__ cmp(scratch2, Immediate(big_non_smi_exponent));
__ j(not_equal, conversion_failure);
// We have the big exponent, typically from >>>. This means the number is
}
__ bind(&normal_exponent);
- // Exponent word in scratch, exponent part of exponent word in scratch2.
- // Zero in ecx.
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(scratch2, Immediate(zero_exponent));
- // ecx already has a Smi zero.
- __ j(less, &done, Label::kNear);
-
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ shr(scratch2, HeapNumber::kExponentShift);
+ // Exponent word in scratch, exponent in scratch2. Zero in ecx.
+ // We know that 0 <= exponent < 30.
__ mov(ecx, Immediate(30));
__ sub(ecx, scratch2);
__ jmp(&done, Label::kNear);
__ bind(&negative);
__ sub(ecx, scratch2);
- __ bind(&done);
}
+ __ bind(&done);
}
}
+void BinaryOpStub::Initialize() {
+ platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
+}
+
+
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
__ push(edx);
__ push(eax);
// Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
__ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(operands_type_)));
__ push(ecx); // Push return address.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
// Left and right arguments are already on top of the stack.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
__ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(operands_type_)));
__ push(ecx); // Push return address.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiCode(
+static void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* slow,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ Token::Value op) {
// 1. Move arguments into edx, eax except for DIV and MOD, which need the
// dividend in eax and edx free for the division. Use eax, ebx for those.
Comment load_comment(masm, "-- Load arguments");
Register left = edx;
Register right = eax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
+ if (op == Token::DIV || op == Token::MOD) {
left = eax;
right = ebx;
__ mov(ebx, eax);
Label not_smis;
Register combined = ecx;
ASSERT(!left.is(combined) && !right.is(combined));
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
// Perform the operation into eax and smi check the result. Preserve
// eax in case the result is not a smi.
// eax and check the result if necessary.
Comment perform_smi(masm, "-- Perform smi operation");
Label use_fp_on_smis;
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
// Nothing to do.
break;
}
// 5. Emit return of result in eax. Some operations have registers pushed.
- switch (op_) {
+ switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
// 6. For some operations emit inline code to perform floating point
// operations on known smis (e.g., if the result of the operation
// overflowed the smi range).
- if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
+ if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
__ bind(&use_fp_on_smis);
- switch (op_) {
+ switch (op) {
// Undo the effects of some operations, and some register moves.
case Token::SHL:
// The arguments are saved on the stack, and only used from there.
}
__ jmp(¬_smis);
} else {
- ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
- switch (op_) {
+ ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
+ switch (op) {
case Token::SHL:
case Token::SHR: {
Comment perform_float(masm, "-- Perform float operation on smis");
// Store the result in the HeapNumber and return.
// It's OK to overwrite the arguments on the stack because we
// are about to return.
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ mov(Operand(esp, 1 * kPointerSize), left);
__ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
__ fild_d(Operand(esp, 1 * kPointerSize));
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
} else {
- ASSERT_EQ(Token::SHL, op_);
+ ASSERT_EQ(Token::SHL, op);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, left);
Comment perform_float(masm, "-- Perform float operation on smis");
__ bind(&use_fp_on_smis);
// Restore arguments to edx, eax.
- switch (op_) {
+ switch (op) {
case Token::ADD:
// Revert right = right + left.
__ sub(right, left);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
__ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
} else { // SSE2 not available, use FPU.
FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ faddp(1); break;
case Token::SUB: __ fsubp(1); break;
case Token::MUL: __ fmulp(1); break;
// edx and eax.
Comment done_comment(masm, "-- Enter non-smi code");
__ bind(¬_smis);
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
case Token::SHL:
case Token::SAR:
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
- GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
} else {
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
}
__ bind(&call_runtime);
switch (op_) {
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
}
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode);
+
+
// Input:
// edx: left operand (tagged)
// eax: right operand (tagged)
// eax: result (tagged)
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::INT32);
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
// Floating point case.
switch (op_) {
Label not_int32;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ // In theory, we would need the same check in the non-SSE2 case,
+ // but since we don't support Crankshaft on such hardware we can
+ // afford not to care about precise type feedback.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(edx, ¬_int32);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(eax, ¬_int32);
+ }
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, ¬_int32, ecx);
if (op_ == Token::MOD) {
__ test(ecx, Immediate(1));
__ j(zero, ¬_int32);
}
- GenerateHeapResultAllocation(masm, &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
}
default: UNREACHABLE();
}
Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, &after_alloc_failure, mode_);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
Label not_floats;
Label not_int32;
Label non_smi_result;
+ // We do not check the input arguments here, as any value is
+ // unconditionally truncated to an int32 anyway. To get the
+ // right optimized code, int32 type feedback is just right.
+ bool use_sse3 = platform_specific_bit_;
FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
+ use_sse3,
¬_floats);
- FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
+ FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3,
¬_int32);
switch (op_) {
case Token::BIT_OR: __ or_(eax, ecx); break;
switch (op_) {
case Token::ADD:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
case Token::DIV:
GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
- break;
+ return; // Handled above.
case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
+ GenerateCallRuntime(masm);
}
Label not_floats;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
+
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ // In theory, we would need the same check in the non-SSE2 case,
+ // but since we don't support Crankshaft on such hardware we can
+ // afford not to care about precise type feedback.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(edx, ¬_floats);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(eax, ¬_floats);
+ }
FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
+ if (left_type_ == BinaryOpIC::INT32) {
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, ¬_floats, xmm0, ecx, xmm2);
+ }
+ if (right_type_ == BinaryOpIC::INT32) {
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, ¬_floats, xmm1, ecx, xmm2);
+ }
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- GenerateHeapResultAllocation(masm, &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
} else { // SSE2 not available, use FPU.
default: UNREACHABLE();
}
Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, &after_alloc_failure, mode_);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
GenerateRegisterArgsPush(masm);
Label not_floats;
Label non_smi_result;
+ // We do not check the input arguments here, as any value is
+ // unconditionally truncated to an int32 anyway. To get the
+ // right optimized code, int32 type feedback is just right.
+ bool use_sse3 = platform_specific_bit_;
FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
+ use_sse3,
¬_floats);
switch (op_) {
case Token::BIT_OR: __ or_(eax, ecx); break;
switch (op_) {
case Token::ADD:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
case Token::DIV:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
case Token::MOD:
GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
break;
case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
+ GenerateCallRuntime(masm);
}
UNREACHABLE();
}
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
// Floating point case.
switch (op_) {
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- GenerateHeapResultAllocation(masm, &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
} else { // SSE2 not available, use FPU.
default: UNREACHABLE();
}
Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, &after_alloc_failure, mode_);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
case Token::SHL:
case Token::SHR: {
Label non_smi_result;
+ bool use_sse3 = platform_specific_bit_;
FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
+ use_sse3,
&call_runtime);
switch (op_) {
case Token::BIT_OR: __ or_(eax, ecx); break;
// result.
__ bind(&call_runtime);
switch (op_) {
- case Token::ADD: {
+ case Token::ADD:
GenerateAddStrings(masm);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
+ // Fall through.
case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
case Token::DIV:
GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
+ GenerateCallRuntime(masm);
}
}
-void BinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Label* alloc_failure) {
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode) {
Label skip_allocation;
- OverwriteMode mode = mode_;
switch (mode) {
case OVERWRITE_LEFT: {
// If the argument in edx is already an object, we skip the
void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
Label* non_int32,
Register scratch) {
- __ cvttsd2si(scratch, Operand(xmm0));
- __ cvtsi2sd(xmm2, scratch);
- __ ucomisd(xmm0, xmm2);
- __ j(not_zero, non_int32);
- __ j(carry, non_int32);
- __ cvttsd2si(scratch, Operand(xmm1));
- __ cvtsi2sd(xmm2, scratch);
- __ ucomisd(xmm1, xmm2);
- __ j(not_zero, non_int32);
- __ j(carry, non_int32);
+ CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, xmm2);
+ CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, xmm2);
+}
+
+
+void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
+ Label* non_int32,
+ XMMRegister operand,
+ Register scratch,
+ XMMRegister xmm_scratch) {
+ __ cvttsd2si(scratch, Operand(operand));
+ __ cvtsi2sd(xmm_scratch, scratch);
+ __ pcmpeqd(xmm_scratch, operand);
+ __ movmskpd(scratch, xmm_scratch);
+ __ test(scratch, Immediate(1));
+ __ j(zero, non_int32);
}
return (cc == greater || cc == greater_equal) ? LESS : GREATER;
}
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+static void CheckInputType(MacroAssembler* masm,
+ Register input,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::HEAP_NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->heap_number_map()));
+ __ j(not_equal, fail);
+ }
+ // We could be strict about symbol/string here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+static void BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
+ __ cmp(scratch, kSymbolTag | kStringTag);
+ __ j(not_equal, label);
+}
+
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects;
+ Condition cc = GetCondition();
- // Compare two smis if required.
- if (include_smi_compare_) {
- Label non_smi, smi_done;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
- __ sub(edx, eax); // Return on the result of the subtraction.
- __ j(no_overflow, &smi_done, Label::kNear);
- __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
- __ bind(&smi_done);
- __ mov(eax, edx);
- __ ret(0);
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected smi operands.");
- }
+ Label miss;
+ CheckInputType(masm, edx, left_, &miss);
+ CheckInputType(masm, eax, right_, &miss);
+
+ // Compare two smis.
+ Label non_smi, smi_done;
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
+ __ sub(edx, eax); // Return on the result of the subtraction.
+ __ j(no_overflow, &smi_done, Label::kNear);
+ __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
+ __ bind(&smi_done);
+ __ mov(eax, edx);
+ __ ret(0);
+ __ bind(&non_smi);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
__ cmp(eax, edx);
__ j(not_equal, ¬_identical);
- if (cc_ != equal) {
+ if (cc != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
Label check_for_nan;
__ cmp(edx, masm->isolate()->factory()->undefined_value());
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
__ ret(0);
__ bind(&check_for_nan);
}
// Test for NaN. Sadly, we can't just compare to factory->nan_value(),
// so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ Label heap_number;
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->heap_number_map()));
+ __ j(equal, &heap_number, Label::kNear);
+ if (cc != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, ¬_identical);
+ }
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if
+ // it's not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // We only accept QNaNs, which have bit 51 set.
+ // Read top bits of double representation (second word of value).
+
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ Set(eax, Immediate(0));
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
+ // bits.
+ __ add(edx, edx);
+ __ cmp(edx, kQuietNaNHighBitsMask << 1);
+ if (cc == equal) {
+ STATIC_ASSERT(EQUAL != 1);
+ __ setcc(above_equal, eax);
__ ret(0);
} else {
- Label heap_number;
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(equal, &heap_number, Label::kNear);
- if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, ¬_identical);
- }
+ Label nan;
+ __ j(above_equal, &nan, Label::kNear);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if
- // it's not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only accept QNaNs, which have bit 51 set.
- // Read top bits of double representation (second word of value).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ Set(eax, Immediate(0));
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
- // bits.
- __ add(edx, edx);
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
- if (cc_ == equal) {
- STATIC_ASSERT(EQUAL != 1);
- __ setcc(above_equal, eax);
- __ ret(0);
- } else {
- Label nan;
- __ j(above_equal, &nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- __ bind(&nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- __ ret(0);
- }
+ __ bind(&nan);
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ ret(0);
}
__ bind(¬_identical);
// Strict equality can quickly decide whether objects are equal.
// Non-strict object equality is slower, so it is handled later in the stub.
- if (cc_ == equal && strict_) {
+ if (cc == equal && strict()) {
Label slow; // Fallthrough label.
Label not_smis;
// If we're doing a strict equality comparison, we don't have to do
}
// Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- CpuFeatures::Scope use_cmov(CMOV);
-
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ ucomisd(xmm0, xmm1);
+ Label non_number_comparison;
+ Label unordered;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ CpuFeatures::Scope use_cmov(CMOV);
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
- __ ret(0);
- } else {
- FloatingPointHelper::CheckFloatOperands(
- masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperand(masm, eax);
- FloatingPointHelper::LoadFloatOperand(masm, edx);
- __ FCmp();
+ FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
+ __ ucomisd(xmm0, xmm1);
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ mov(eax, 0); // equal
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
+ __ cmov(above, eax, ecx);
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
+ __ cmov(below, eax, ecx);
+ __ ret(0);
+ } else {
+ FloatingPointHelper::CheckFloatOperands(
+ masm, &non_number_comparison, ebx);
+ FloatingPointHelper::LoadFloatOperand(masm, eax);
+ FloatingPointHelper::LoadFloatOperand(masm, edx);
+ __ FCmp();
- Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, Label::kNear);
- __ j(above, &above_label, Label::kNear);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
- __ Set(eax, Immediate(0));
- __ ret(0);
+ Label below_label, above_label;
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ j(below, &below_label, Label::kNear);
+ __ j(above, &above_label, Label::kNear);
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
+ __ Set(eax, Immediate(0));
+ __ ret(0);
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(0);
- }
+ __ bind(&below_label);
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ __ ret(0);
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- }
+ __ bind(&above_label);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
__ ret(0);
+ }
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc != not_equal);
+ if (cc == less || cc == less_equal) {
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
}
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
// Fast negative check for symbol-to-symbol equality.
Label check_for_strings;
- if (cc_ == equal) {
+ if (cc == equal) {
BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
&check_unequal_objects);
// Inline comparison of ASCII strings.
- if (cc_ == equal) {
+ if (cc == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
edx,
eax,
#endif
__ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
+ if (cc == equal && !strict()) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == equal) {
+ builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
builtin = Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
}
// Restore return address on the stack.
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
- __ cmp(scratch, kSymbolTag | kStringTag);
- __ j(not_equal, label);
+ __ bind(&miss);
+ GenerateMiss(masm);
}
Register InstanceofStub::right() { return edx; }
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == equal || cc_ == not_equal;
- stream->Add("CompareStub_%s", cc_name);
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ mov(ecx, edx);
__ or_(ecx, eax);
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+ ASSERT(state_ == CompareIC::HEAP_NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
- __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &maybe_undefined2, Label::kNear);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(edx, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(eax, &miss);
+ }
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or SS2 or CMOV is unsupported.
+ // stub if NaN is involved or SSE2 or CMOV is unsupported.
if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
CpuFeatures::Scope scope1(SSE2);
CpuFeatures::Scope scope2(CMOV);
- // Load left and right operand
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(eax, &right_smi, Label::kNear);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ jmp(&left, Label::kNear);
+ __ bind(&right_smi);
+ __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
+ __ SmiUntag(ecx);
+ __ cvtsi2sd(xmm1, ecx);
+
+ __ bind(&left);
+ __ JumpIfSmi(edx, &left_smi, Label::kNear);
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
+ __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ jmp(&done);
+ __ bind(&left_smi);
+ __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
+ __ SmiUntag(ecx);
+ __ cvtsi2sd(xmm0, ecx);
- // Compare operands
+ __ bind(&done);
+ // Compare operands.
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
+ } else {
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
+
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
}
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
__ j(not_equal, &miss);
+ __ JumpIfSmi(edx, &unordered);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ jmp(&unordered);
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+ ASSERT(state_ == CompareIC::SYMBOL);
ASSERT(GetCondition() == equal);
// Registers containing left and right operands respectively.
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
};
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- use_sse3_ = CpuFeatures::IsSupported(SSE3);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_sse3_(SSE3Bits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_sse3_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class SSE3Bits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | SSE3Bits::encode(use_sse3_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = no_condition;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cc = CompareIC::ComputeCondition(op);
__ pop(edx);
bool inline_smi_code = ShouldInlineSmiCase(op);
}
-static bool HasInlinedSmiCode(Address address) {
+bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
-
- State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
-}
-
-
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
}
if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ if (FLAG_code_comments) {
+ HValue* hydrogen = instr->hydrogen_value();
+ if (hydrogen != NULL) {
+ if (hydrogen->IsChange()) {
+ HValue* changed_value = HChange::cast(hydrogen)->value();
+ int use_id = 0;
+ const char* use_mnemo = "dead";
+ if (hydrogen->UseCount() >= 1) {
+ HValue* use_value = hydrogen->uses().value();
+ use_id = use_value->id();
+ use_mnemo = use_value->Mnemonic();
+ }
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
+ current_instruction_, instr->Mnemonic(),
+ changed_value->id(), changed_value->Mnemonic(),
+ use_id, use_mnemo);
+ } else {
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
+ instr->Mnemonic(), hydrogen->id());
+ }
+ } else {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ }
+ }
instr->CompileToNative(this);
}
}
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ cmp(input_reg, factory()->undefined_value());
+ __ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
__ mov(input_reg, 0);
__ jmp(&done, Label::kNear);
__ j(less, &convert, Label::kNear);
// Pop FPU stack before deoptimizing.
__ fstp(0);
+ __ RecordComment("Deferred TaggedToI: exponent too big");
DeoptimizeIf(no_condition, instr->environment());
// Reserve space for 64 bit answer.
}
} else {
// Deoptimize if we don't have a heap number.
+ __ RecordComment("Deferred TaggedToI: not a heap number");
DeoptimizeIf(not_equal, instr->environment());
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
__ cvttsd2si(input_reg, Operand(xmm0));
__ cvtsi2sd(xmm_temp, Operand(input_reg));
__ ucomisd(xmm0, xmm_temp);
+ __ RecordComment("Deferred TaggedToI: lost precision");
DeoptimizeIf(not_equal, instr->environment());
+ __ RecordComment("Deferred TaggedToI: NaN");
DeoptimizeIf(parity_even, instr->environment()); // NaN.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ test(input_reg, Operand(input_reg));
__ j(not_zero, &done);
__ movmskpd(input_reg, xmm0);
__ and_(input_reg, 1);
+ __ RecordComment("Deferred TaggedToI: minus zero");
DeoptimizeIf(not_zero, instr->environment());
}
}
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
+ Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
+ for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
void CompareIC::Clear(Address address, Code* target) {
- // Only clear ICCompareStubs, we currently cannot clear generic CompareStubs.
- if (target->major_key() != CodeStub::CompareIC) return;
+ ASSERT(target->major_key() == CodeStub::CompareIC);
+ CompareIC::State handler_state;
+ Token::Value op;
+ ICCompareStub::DecodeMinorKey(target->stub_info(), NULL, NULL,
+ &handler_state, &op);
// Only clear CompareICs that can retain objects.
- if (target->compare_state() != KNOWN_OBJECTS) return;
- Token::Value op = CompareIC::ComputeOperation(target);
+ if (handler_state != KNOWN_OBJECTS) return;
SetTargetAtAddress(address, GetRawUninitialized(op));
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
switch (type_info) {
case UNINITIALIZED: return "Uninitialized";
case SMI: return "SMI";
- case INT32: return "Int32s";
- case HEAP_NUMBER: return "HeapNumbers";
+ case INT32: return "Int32";
+ case HEAP_NUMBER: return "HeapNumber";
case ODDBALL: return "Oddball";
- case BOTH_STRING: return "BothStrings";
- case STRING: return "Strings";
+ case STRING: return "String";
case GENERIC: return "Generic";
default: return "Invalid";
}
case INT32:
case HEAP_NUMBER:
case ODDBALL:
- case BOTH_STRING:
case STRING:
return MONOMORPHIC;
case GENERIC:
}
-BinaryOpIC::TypeInfo BinaryOpIC::JoinTypes(BinaryOpIC::TypeInfo x,
- BinaryOpIC::TypeInfo y) {
- if (x == UNINITIALIZED) return y;
- if (y == UNINITIALIZED) return x;
- if (x == y) return x;
- if (x == BOTH_STRING && y == STRING) return STRING;
- if (x == STRING && y == BOTH_STRING) return STRING;
- if (x == STRING || x == BOTH_STRING || y == STRING || y == BOTH_STRING) {
- return GENERIC;
- }
- if (x > y) return x;
- return y;
-}
-
-
-BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Handle<Object> left,
- Handle<Object> right) {
- ::v8::internal::TypeInfo left_type =
- ::v8::internal::TypeInfo::TypeFromValue(left);
- ::v8::internal::TypeInfo right_type =
- ::v8::internal::TypeInfo::TypeFromValue(right);
-
- if (left_type.IsSmi() && right_type.IsSmi()) {
- return SMI;
- }
-
- if (left_type.IsInteger32() && right_type.IsInteger32()) {
- // Platforms with 32-bit Smis have no distinct INT32 type.
- if (kSmiValueSize == 32) return SMI;
- return INT32;
- }
-
- if (left_type.IsNumber() && right_type.IsNumber()) {
- return HEAP_NUMBER;
- }
-
- // Patching for fast string ADD makes sense even if only one of the
- // arguments is a string.
- if (left_type.IsString()) {
- return right_type.IsString() ? BOTH_STRING : STRING;
- } else if (right_type.IsString()) {
- return STRING;
- }
-
- // Check for oddball objects.
- if (left->IsUndefined() && right->IsNumber()) return ODDBALL;
- if (left->IsNumber() && right->IsUndefined()) return ODDBALL;
-
- return GENERIC;
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
ASSERT(args.length() == 4);
return *result;
}
+
+static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
+ Token::Value op) {
+ ::v8::internal::TypeInfo type =
+ ::v8::internal::TypeInfo::TypeFromValue(value);
+ if (type.IsSmi()) return BinaryOpIC::SMI;
+ if (type.IsInteger32()) {
+ if (kSmiValueSize == 32) return BinaryOpIC::SMI;
+ return BinaryOpIC::INT32;
+ }
+ if (type.IsNumber()) return BinaryOpIC::HEAP_NUMBER;
+ if (type.IsString()) return BinaryOpIC::STRING;
+ if (value->IsUndefined()) {
+ if (op == Token::BIT_AND ||
+ op == Token::BIT_OR ||
+ op == Token::BIT_XOR ||
+ op == Token::SAR ||
+ op == Token::SHL ||
+ op == Token::SHR) {
+ if (kSmiValueSize == 32) return BinaryOpIC::SMI;
+ return BinaryOpIC::INT32;
+ }
+ return BinaryOpIC::ODDBALL;
+ }
+ return BinaryOpIC::GENERIC;
+}
+
+
+static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type,
+ Handle<Object> value,
+ Token::Value op) {
+ BinaryOpIC::TypeInfo new_type = TypeInfoFromValue(value, op);
+ if (old_type == BinaryOpIC::STRING) {
+ if (new_type == BinaryOpIC::STRING) return new_type;
+ return BinaryOpIC::GENERIC;
+ }
+ return Max(old_type, new_type);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
- ASSERT(args.length() == 5);
+ ASSERT(args.length() == 3);
HandleScope scope(isolate);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
int key = args.smi_at(2);
- Token::Value op = static_cast<Token::Value>(args.smi_at(3));
- BinaryOpIC::TypeInfo previous_type =
- static_cast<BinaryOpIC::TypeInfo>(args.smi_at(4));
+ Token::Value op = BinaryOpStub::decode_op_from_minor_key(key);
+ BinaryOpIC::TypeInfo previous_left, previous_right, unused_previous_result;
+ BinaryOpStub::decode_types_from_minor_key(
+ key, &previous_left, &previous_right, &unused_previous_result);
- BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(left, right);
- type = BinaryOpIC::JoinTypes(type, previous_type);
+ BinaryOpIC::TypeInfo new_left = InputState(previous_left, left, op);
+ BinaryOpIC::TypeInfo new_right = InputState(previous_right, right, op);
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
- if ((type == BinaryOpIC::STRING || type == BinaryOpIC::BOTH_STRING) &&
+
+ // STRING is only used for ADD operations.
+ if ((new_left == BinaryOpIC::STRING || new_right == BinaryOpIC::STRING) &&
op != Token::ADD) {
- type = BinaryOpIC::GENERIC;
+ new_left = new_right = BinaryOpIC::GENERIC;
}
- if (type == BinaryOpIC::SMI && previous_type == BinaryOpIC::SMI) {
+
+ BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right);
+ BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right);
+
+ if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
if (op == Token::DIV ||
op == Token::MUL ||
op == Token::SHR ||
result_type = BinaryOpIC::INT32;
}
}
- if (type == BinaryOpIC::INT32 && previous_type == BinaryOpIC::INT32) {
- // We must be here because an operation on two INT32 types overflowed.
- result_type = BinaryOpIC::HEAP_NUMBER;
+ if (new_overall == BinaryOpIC::INT32 &&
+ previous_overall == BinaryOpIC::INT32) {
+ if (new_left == previous_left && new_right == previous_right) {
+ result_type = BinaryOpIC::HEAP_NUMBER;
+ }
}
- BinaryOpStub stub(key, type, result_type);
+ BinaryOpStub stub(key, new_left, new_right, result_type);
Handle<Code> code = stub.GetCode();
if (!code.is_null()) {
+#ifdef DEBUG
if (FLAG_trace_ic) {
- PrintF("[BinaryOpIC (%s->(%s->%s))#%s]\n",
- BinaryOpIC::GetName(previous_type),
- BinaryOpIC::GetName(type),
+ PrintF("[BinaryOpIC in ");
+ JavaScriptFrame::PrintTop(stdout, false, true);
+ PrintF(" ((%s+%s)->((%s+%s)->%s))#%s @ %p]\n",
+ BinaryOpIC::GetName(previous_left),
+ BinaryOpIC::GetName(previous_right),
+ BinaryOpIC::GetName(new_left),
+ BinaryOpIC::GetName(new_right),
BinaryOpIC::GetName(result_type),
- Token::Name(op));
+ Token::Name(op),
+ static_cast<void*>(*code));
}
+#endif
BinaryOpIC ic(isolate);
ic.patch(*code);
// Activate inlined smi code.
- if (previous_type == BinaryOpIC::UNINITIALIZED) {
+ if (previous_overall == BinaryOpIC::UNINITIALIZED) {
PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
}
}
Code* CompareIC::GetRawUninitialized(Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED);
+ ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
Code* code = NULL;
CHECK(stub.FindCodeInCache(&code));
return code;
Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED);
+ ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
return stub.GetCode();
}
-CompareIC::State CompareIC::ComputeState(Code* target) {
- int key = target->major_key();
- if (key == CodeStub::Compare) return GENERIC;
- ASSERT(key == CodeStub::CompareIC);
- return static_cast<State>(target->compare_state());
-}
-
-
-Token::Value CompareIC::ComputeOperation(Code* target) {
- ASSERT(target->major_key() == CodeStub::CompareIC);
- return static_cast<Token::Value>(
- target->compare_operation() + Token::EQ);
-}
-
-
const char* CompareIC::GetStateName(State state) {
switch (state) {
case UNINITIALIZED: return "UNINITIALIZED";
- case SMIS: return "SMIS";
- case HEAP_NUMBERS: return "HEAP_NUMBERS";
- case OBJECTS: return "OBJECTS";
+ case SMI: return "SMI";
+ case HEAP_NUMBER: return "HEAP_NUMBER";
+ case OBJECT: return "OBJECTS";
case KNOWN_OBJECTS: return "KNOWN_OBJECTS";
- case SYMBOLS: return "SYMBOLS";
- case STRINGS: return "STRINGS";
+ case SYMBOL: return "SYMBOL";
+ case STRING: return "STRING";
case GENERIC: return "GENERIC";
default:
UNREACHABLE();
}
-CompareIC::State CompareIC::TargetState(State state,
+static CompareIC::State InputState(CompareIC::State old_state,
+ Handle<Object> value) {
+ switch (old_state) {
+ case CompareIC::UNINITIALIZED:
+ if (value->IsSmi()) return CompareIC::SMI;
+ if (value->IsHeapNumber()) return CompareIC::HEAP_NUMBER;
+ if (value->IsSymbol()) return CompareIC::SYMBOL;
+ if (value->IsString()) return CompareIC::STRING;
+ if (value->IsJSObject()) return CompareIC::OBJECT;
+ break;
+ case CompareIC::SMI:
+ if (value->IsSmi()) return CompareIC::SMI;
+ if (value->IsHeapNumber()) return CompareIC::HEAP_NUMBER;
+ break;
+ case CompareIC::HEAP_NUMBER:
+ if (value->IsNumber()) return CompareIC::HEAP_NUMBER;
+ break;
+ case CompareIC::SYMBOL:
+ if (value->IsSymbol()) return CompareIC::SYMBOL;
+ if (value->IsString()) return CompareIC::STRING;
+ break;
+ case CompareIC::STRING:
+ if (value->IsSymbol() || value->IsString()) return CompareIC::STRING;
+ break;
+ case CompareIC::OBJECT:
+ if (value->IsJSObject()) return CompareIC::OBJECT;
+ break;
+ case CompareIC::GENERIC:
+ break;
+ case CompareIC::KNOWN_OBJECTS:
+ UNREACHABLE();
+ break;
+ }
+ return CompareIC::GENERIC;
+}
+
+
+CompareIC::State CompareIC::TargetState(State old_state,
+ State old_left,
+ State old_right,
bool has_inlined_smi_code,
Handle<Object> x,
Handle<Object> y) {
- switch (state) {
+ switch (old_state) {
case UNINITIALIZED:
- if (x->IsSmi() && y->IsSmi()) return SMIS;
- if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
+ if (x->IsSmi() && y->IsSmi()) return SMI;
+ if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBER;
if (Token::IsOrderedRelationalCompareOp(op_)) {
// Ordered comparisons treat undefined as NaN, so the
// HEAP_NUMBER stub will do the right thing.
if ((x->IsNumber() && y->IsUndefined()) ||
(y->IsNumber() && x->IsUndefined())) {
- return HEAP_NUMBERS;
+ return HEAP_NUMBER;
}
}
if (x->IsSymbol() && y->IsSymbol()) {
// We compare symbols as strings if we need to determine
// the order in a non-equality compare.
- return Token::IsEqualityOp(op_) ? SYMBOLS : STRINGS;
+ return Token::IsEqualityOp(op_) ? SYMBOL : STRING;
}
- if (x->IsString() && y->IsString()) return STRINGS;
+ if (x->IsString() && y->IsString()) return STRING;
if (!Token::IsEqualityOp(op_)) return GENERIC;
if (x->IsJSObject() && y->IsJSObject()) {
if (Handle<JSObject>::cast(x)->map() ==
Token::IsEqualityOp(op_)) {
return KNOWN_OBJECTS;
} else {
- return OBJECTS;
+ return OBJECT;
}
}
return GENERIC;
- case SMIS:
- return has_inlined_smi_code && x->IsNumber() && y->IsNumber()
- ? HEAP_NUMBERS
+ case SMI:
+ return x->IsNumber() && y->IsNumber()
+ ? HEAP_NUMBER
: GENERIC;
- case SYMBOLS:
+ case SYMBOL:
ASSERT(Token::IsEqualityOp(op_));
- return x->IsString() && y->IsString() ? STRINGS : GENERIC;
- case HEAP_NUMBERS:
- case STRINGS:
- case OBJECTS:
+ return x->IsString() && y->IsString() ? STRING : GENERIC;
+ case HEAP_NUMBER:
+ if (old_left == SMI && x->IsHeapNumber()) return HEAP_NUMBER;
+ if (old_right == SMI && y->IsHeapNumber()) return HEAP_NUMBER;
+ case STRING:
+ case OBJECT:
case KNOWN_OBJECTS:
case GENERIC:
return GENERIC;
}
UNREACHABLE();
- return GENERIC;
+ return GENERIC; // Make the compiler happy.
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+ HandleScope scope;
+ State previous_left, previous_right, previous_state;
+ ICCompareStub::DecodeMinorKey(target()->stub_info(), &previous_left,
+ &previous_right, &previous_state, NULL);
+ State new_left = InputState(previous_left, x);
+ State new_right = InputState(previous_right, y);
+ State state = TargetState(previous_state, previous_left, previous_right,
+ HasInlinedSmiCode(address()), x, y);
+ ICCompareStub stub(op_, new_left, new_right, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
+ set_target(*stub.GetCode());
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[CompareIC in ");
+ JavaScriptFrame::PrintTop(stdout, false, true);
+ PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
+ GetStateName(previous_left),
+ GetStateName(previous_right),
+ GetStateName(previous_state),
+ GetStateName(new_left),
+ GetStateName(new_right),
+ GetStateName(state),
+ Token::Name(op_),
+ static_cast<void*>(*stub.GetCode()));
+ }
+#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ }
}
-// Used from ic_<arch>.cc.
+// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
INT32,
HEAP_NUMBER,
ODDBALL,
- BOTH_STRING, // Only used for addition operation.
- STRING, // Only used for addition operation. At least one string operand.
+ STRING, // Only used for addition operation.
GENERIC
};
static const char* GetName(TypeInfo type_info);
static State ToState(TypeInfo type_info);
-
- static TypeInfo GetTypeInfo(Handle<Object> left, Handle<Object> right);
-
- static TypeInfo JoinTypes(TypeInfo x, TypeInfo y);
};
public:
enum State {
UNINITIALIZED,
- SMIS,
- HEAP_NUMBERS,
- SYMBOLS,
- STRINGS,
- OBJECTS,
+ SMI,
+ HEAP_NUMBER,
+ SYMBOL,
+ STRING,
+ OBJECT,
KNOWN_OBJECTS,
GENERIC
};
// Update the inline cache for the given operands.
void UpdateCaches(Handle<Object> x, Handle<Object> y);
+
// Factory method for getting an uninitialized compare stub.
static Handle<Code> GetUninitialized(Token::Value op);
// Helper function for computing the condition for a compare operation.
static Condition ComputeCondition(Token::Value op);
- // Helper function for determining the state of a compare IC.
- static State ComputeState(Code* target);
-
- // Helper function for determining the operation a compare IC is for.
- static Token::Value ComputeOperation(Code* target);
-
static const char* GetStateName(State state);
private:
- State TargetState(State state, bool has_inlined_smi_code,
- Handle<Object> x, Handle<Object> y);
+ static bool HasInlinedSmiCode(Address address);
+
+ State TargetState(State old_state,
+ State old_left,
+ State old_right,
+ bool has_inlined_smi_code,
+ Handle<Object> x,
+ Handle<Object> y);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return ComputeCondition(op_); }
- State GetState() { return ComputeState(target()); }
static Code* GetRawUninitialized(Token::Value op);
}
-byte Code::binary_op_type() {
- ASSERT(is_binary_op_stub());
- return BinaryOpTypeField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_binary_op_type(byte value) {
- ASSERT(is_binary_op_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = BinaryOpTypeField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-byte Code::binary_op_result_type() {
- ASSERT(is_binary_op_stub());
- return BinaryOpResultTypeField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_binary_op_result_type(byte value) {
- ASSERT(is_binary_op_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = BinaryOpResultTypeField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-byte Code::compare_state() {
- ASSERT(is_compare_ic_stub());
- return CompareStateField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_compare_state(byte value) {
- ASSERT(is_compare_ic_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = CompareStateField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-byte Code::compare_operation() {
- ASSERT(is_compare_ic_stub());
- return CompareOperationField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_compare_operation(byte value) {
- ASSERT(is_compare_ic_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = CompareOperationField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
byte Code::to_boolean_state() {
ASSERT(is_to_boolean_ic_stub());
return ToBooleanStateField::decode(
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-ACCESSORS(Code, type_feedback_info, Object, kTypeFeedbackInfoOffset)
+
+
+// Type feedback slot: type_feedback_info for FUNCTIONs, stub_info for STUBs.
+void Code::InitializeTypeFeedbackInfoNoWriteBarrier(Object* value) {
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+}
+
+
+Object* Code::type_feedback_info() {
+ ASSERT(kind() == FUNCTION);
+ return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+}
+
+
+void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
+ ASSERT(kind() == FUNCTION);
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
+ value, mode);
+}
+
+
+int Code::stub_info() {
+ ASSERT(kind() == COMPARE_IC || kind() == BINARY_OP_IC);
+ Object* value = READ_FIELD(this, kTypeFeedbackInfoOffset);
+ return Smi::cast(value)->value();
+}
+
+
+void Code::set_stub_info(int value) {
+ ASSERT(kind() == COMPARE_IC || kind() == BINARY_OP_IC);
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, Smi::FromInt(value));
+}
+
+
ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
INT_ACCESSORS(Code, ic_age, kICAgeOffset)
+
byte* Code::instruction_start() {
return FIELD_ADDR(this, kHeaderSize);
}
void Code::ClearTypeFeedbackCells(Heap* heap) {
+ if (kind() != FUNCTION) return;
Object* raw_info = type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackCells* type_feedback_cells =
bool Code::allowed_in_shared_map_code_cache() {
return is_keyed_load_stub() || is_keyed_store_stub() ||
- (is_compare_ic_stub() && compare_state() == CompareIC::KNOWN_OBJECTS);
+ (is_compare_ic_stub() &&
+ ICCompareStub::CompareState(stub_info()) == CompareIC::KNOWN_OBJECTS);
}
PrintF(out, "argc = %d\n", arguments_count());
}
if (is_compare_ic_stub()) {
- CompareIC::State state = CompareIC::ComputeState(this);
- PrintF(out, "compare_state = %s\n", CompareIC::GetStateName(state));
- }
- if (is_compare_ic_stub() && major_key() == CodeStub::CompareIC) {
- Token::Value op = CompareIC::ComputeOperation(this);
+ ASSERT(major_key() == CodeStub::CompareIC);
+ CompareIC::State left_state, right_state, handler_state;
+ Token::Value op;
+ ICCompareStub::DecodeMinorKey(stub_info(), &left_state, &right_state,
+ &handler_state, &op);
+ PrintF(out, "compare_state = %s*%s -> %s\n",
+ CompareIC::GetStateName(left_state),
+ CompareIC::GetStateName(right_state),
+ CompareIC::GetStateName(handler_state));
PrintF(out, "compare_operation = %s\n", Token::Name(op));
}
}
PrintF(out, "\n");
}
PrintF(out, "\n");
- // Just print if type feedback info is ever used for optimized code.
- ASSERT(type_feedback_info()->IsUndefined());
} else if (kind() == FUNCTION) {
unsigned offset = stack_check_table_offset();
// If there is no stack check table, the "table start" will at or after
DECL_ACCESSORS(deoptimization_data, FixedArray)
// [type_feedback_info]: Struct containing type feedback information.
- // Will contain either a TypeFeedbackInfo object, or undefined.
+ // STUBs can use this slot to store arbitrary information as a Smi.
+ // Will contain either a TypeFeedbackInfo object, or undefined, or a Smi.
DECL_ACCESSORS(type_feedback_info, Object)
+ inline void InitializeTypeFeedbackInfoNoWriteBarrier(Object* value);
+ inline int stub_info();
+ inline void set_stub_info(int info);
// [gc_metadata]: Field used to hold GC related metadata. The contents of this
// field does not have to be traced during garbage collection since
inline byte unary_op_type();
inline void set_unary_op_type(byte value);
- // [type-recording binary op type]: For kind BINARY_OP_IC.
- inline byte binary_op_type();
- inline void set_binary_op_type(byte value);
- inline byte binary_op_result_type();
- inline void set_binary_op_result_type(byte value);
-
- // [compare state]: For kind COMPARE_IC, tells what state the stub is in.
- inline byte compare_state();
- inline void set_compare_state(byte value);
-
- // [compare_operation]: For kind COMPARE_IC tells what compare operation the
- // stub was generated for.
- inline byte compare_operation();
- inline void set_compare_operation(byte value);
-
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
inline void set_to_boolean_state(byte value);
static const int kUnaryOpTypeFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kUnaryOpTypeBitCount = 3;
- static const int kBinaryOpTypeFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kBinaryOpTypeBitCount = 3;
- static const int kBinaryOpResultTypeFirstBit =
- kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount;
- static const int kBinaryOpResultTypeBitCount = 3;
- static const int kCompareStateFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kCompareStateBitCount = 3;
- static const int kCompareOperationFirstBit =
- kCompareStateFirstBit + kCompareStateBitCount;
- static const int kCompareOperationBitCount = 4;
static const int kToBooleanStateFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kToBooleanStateBitCount = 8;
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
- STATIC_ASSERT(kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount <= 32);
- STATIC_ASSERT(kBinaryOpResultTypeFirstBit +
- kBinaryOpResultTypeBitCount <= 32);
- STATIC_ASSERT(kCompareStateFirstBit + kCompareStateBitCount <= 32);
- STATIC_ASSERT(kCompareOperationFirstBit + kCompareOperationBitCount <= 32);
STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32);
STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
class UnaryOpTypeField: public BitField<int,
kUnaryOpTypeFirstBit, kUnaryOpTypeBitCount> {}; // NOLINT
- class BinaryOpTypeField: public BitField<int,
- kBinaryOpTypeFirstBit, kBinaryOpTypeBitCount> {}; // NOLINT
- class BinaryOpResultTypeField: public BitField<int,
- kBinaryOpResultTypeFirstBit, kBinaryOpResultTypeBitCount> {}; // NOLINT
- class CompareStateField: public BitField<int,
- kCompareStateFirstBit, kCompareStateBitCount> {}; // NOLINT
- class CompareOperationField: public BitField<int,
- kCompareOperationFirstBit, kCompareOperationBitCount> {}; // NOLINT
class ToBooleanStateField: public BitField<int,
kToBooleanStateFirstBit, kToBooleanStateBitCount> {}; // NOLINT
class HasFunctionCacheField: public BitField<bool,
SetInternalReference(code, entry,
"deoptimization_data", code->deoptimization_data(),
Code::kDeoptimizationDataOffset);
- SetInternalReference(code, entry,
- "type_feedback_info", code->type_feedback_info(),
- Code::kTypeFeedbackInfoOffset);
+ if (code->kind() == Code::FUNCTION) {
+ SetInternalReference(code, entry,
+ "type_feedback_info", code->type_feedback_info(),
+ Code::kTypeFeedbackInfoOffset);
+ }
SetInternalReference(code, entry,
"gc_metadata", code->gc_metadata(),
Code::kGCMetadataOffset);
}
-TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
- Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return unknown;
-
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+static TypeInfo TypeFromCompareType(CompareIC::State state) {
switch (state) {
case CompareIC::UNINITIALIZED:
// Uninitialized means never executed.
return TypeInfo::Uninitialized();
- case CompareIC::SMIS:
+ case CompareIC::SMI:
return TypeInfo::Smi();
- case CompareIC::HEAP_NUMBERS:
+ case CompareIC::HEAP_NUMBER:
return TypeInfo::Number();
- case CompareIC::SYMBOLS:
- case CompareIC::STRINGS:
+ case CompareIC::SYMBOL:
+ return TypeInfo::Symbol();
+ case CompareIC::STRING:
return TypeInfo::String();
- case CompareIC::OBJECTS:
+ case CompareIC::OBJECT:
case CompareIC::KNOWN_OBJECTS:
// TODO(kasperl): We really need a type for JS objects here.
return TypeInfo::NonPrimitive();
case CompareIC::GENERIC:
default:
- return unknown;
+ return TypeInfo::Unknown();
}
}
-bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
+void TypeFeedbackOracle::CompareType(CompareOperation* expr,
+ TypeInfo* left_type,
+ TypeInfo* right_type,
+ TypeInfo* overall_type) {
Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- if (!object->IsCode()) return false;
+ TypeInfo unknown = TypeInfo::Unknown();
+ if (!object->IsCode()) {
+ *left_type = *right_type = *overall_type = unknown;
+ return;
+ }
Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return false;
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
- return state == CompareIC::SYMBOLS;
+ if (!code->is_compare_ic_stub()) {
+ *left_type = *right_type = *overall_type = unknown;
+ return;
+ }
+
+ int stub_minor_key = code->stub_info();
+ CompareIC::State left_state, right_state, handler_state;
+ ICCompareStub::DecodeMinorKey(stub_minor_key, &left_state, &right_state,
+ &handler_state, NULL);
+ *left_type = TypeFromCompareType(left_state);
+ *right_type = TypeFromCompareType(right_state);
+ *overall_type = TypeFromCompareType(handler_state);
}
if (!object->IsCode()) return Handle<Map>::null();
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_compare_ic_stub()) return Handle<Map>::null();
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+ CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
if (state != CompareIC::KNOWN_OBJECTS) {
return Handle<Map>::null();
}
}
-TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
+static TypeInfo TypeFromBinaryOpType(BinaryOpIC::TypeInfo binary_type) {
+ switch (binary_type) {
+ // Uninitialized means never executed.
+ case BinaryOpIC::UNINITIALIZED: return TypeInfo::Uninitialized();
+ case BinaryOpIC::SMI: return TypeInfo::Smi();
+ case BinaryOpIC::INT32: return TypeInfo::Integer32();
+ case BinaryOpIC::HEAP_NUMBER: return TypeInfo::Double();
+ case BinaryOpIC::ODDBALL: return TypeInfo::Unknown();
+ case BinaryOpIC::STRING: return TypeInfo::String();
+ case BinaryOpIC::GENERIC: return TypeInfo::Unknown();
+ }
+ UNREACHABLE();
+ return TypeInfo::Unknown();
+}
+
+
+void TypeFeedbackOracle::BinaryType(BinaryOperation* expr,
+ TypeInfo* left,
+ TypeInfo* right,
+ TypeInfo* result) {
Handle<Object> object = GetInfo(expr->BinaryOperationFeedbackId());
TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
+ if (!object->IsCode()) {
+ *left = *right = *result = unknown;
+ return;
+ }
Handle<Code> code = Handle<Code>::cast(object);
if (code->is_binary_op_stub()) {
- BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
- code->binary_op_type());
- BinaryOpIC::TypeInfo result_type = static_cast<BinaryOpIC::TypeInfo>(
- code->binary_op_result_type());
-
- switch (type) {
- case BinaryOpIC::UNINITIALIZED:
- // Uninitialized means never executed.
- return TypeInfo::Uninitialized();
- case BinaryOpIC::SMI:
- switch (result_type) {
- case BinaryOpIC::UNINITIALIZED:
- if (expr->op() == Token::DIV) {
- return TypeInfo::Double();
- }
- return TypeInfo::Smi();
- case BinaryOpIC::SMI:
- return TypeInfo::Smi();
- case BinaryOpIC::INT32:
- return TypeInfo::Integer32();
- case BinaryOpIC::HEAP_NUMBER:
- return TypeInfo::Double();
- default:
- return unknown;
- }
- case BinaryOpIC::INT32:
- if (expr->op() == Token::DIV ||
- result_type == BinaryOpIC::HEAP_NUMBER) {
- return TypeInfo::Double();
- }
- return TypeInfo::Integer32();
- case BinaryOpIC::HEAP_NUMBER:
- return TypeInfo::Double();
- case BinaryOpIC::BOTH_STRING:
- return TypeInfo::String();
- case BinaryOpIC::STRING:
- case BinaryOpIC::GENERIC:
- return unknown;
- default:
- return unknown;
- }
+ BinaryOpIC::TypeInfo left_type, right_type, result_type;
+ BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
+ &right_type, &result_type);
+ *left = TypeFromBinaryOpType(left_type);
+ *right = TypeFromBinaryOpType(right_type);
+ *result = TypeFromBinaryOpType(result_type);
+ return;
}
- return unknown;
+ // Not a binary op stub.
+ *left = *right = *result = unknown;
}
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_compare_ic_stub()) return unknown;
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
- switch (state) {
- case CompareIC::UNINITIALIZED:
- // Uninitialized means never executed.
- // TODO(fschneider): Introduce a separate value for never-executed ICs.
- return unknown;
- case CompareIC::SMIS:
- return TypeInfo::Smi();
- case CompareIC::STRINGS:
- return TypeInfo::String();
- case CompareIC::SYMBOLS:
- return TypeInfo::Symbol();
- case CompareIC::HEAP_NUMBERS:
- return TypeInfo::Number();
- case CompareIC::OBJECTS:
- case CompareIC::KNOWN_OBJECTS:
- // TODO(kasperl): We really need a type for JS objects here.
- return TypeInfo::NonPrimitive();
- case CompareIC::GENERIC:
- default:
- return unknown;
- }
+ CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
+ return TypeFromCompareType(state);
}
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_binary_op_stub()) return unknown;
- BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
- code->binary_op_type());
- switch (type) {
+ BinaryOpIC::TypeInfo left_type, right_type, unused_result_type;
+ BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
+ &right_type, &unused_result_type);
+ // CountOperations should always have +1 or -1 as their right input.
+ ASSERT(right_type == BinaryOpIC::SMI ||
+ right_type == BinaryOpIC::UNINITIALIZED);
+
+ switch (left_type) {
case BinaryOpIC::UNINITIALIZED:
case BinaryOpIC::SMI:
return TypeInfo::Smi();
return TypeInfo::Integer32();
case BinaryOpIC::HEAP_NUMBER:
return TypeInfo::Double();
- case BinaryOpIC::BOTH_STRING:
case BinaryOpIC::STRING:
case BinaryOpIC::GENERIC:
return unknown;
kNonPrimitive = 0x40, // 1000000
kUninitialized = 0x7f // 1111111
};
+
explicit inline TypeInfo(Type t) : type_(t) { }
Type type_;
// Get type information for arithmetic operations and compares.
TypeInfo UnaryType(UnaryOperation* expr);
- TypeInfo BinaryType(BinaryOperation* expr);
- TypeInfo CompareType(CompareOperation* expr);
- bool IsSymbolCompare(CompareOperation* expr);
+ void BinaryType(BinaryOperation* expr,
+ TypeInfo* left,
+ TypeInfo* right,
+ TypeInfo* result);
+ void CompareType(CompareOperation* expr,
+ TypeInfo* left_type,
+ TypeInfo* right_type,
+ TypeInfo* overall_type);
Handle<Map> GetCompareMap(CompareOperation* expr);
TypeInfo SwitchType(CaseClause* clause);
TypeInfo IncrementType(CountOperation* expr);
class FloatingPointHelper : public AllStatic {
public:
+ enum ConvertUndefined {
+ CONVERT_UNDEFINED_TO_ZERO,
+ BAILOUT_ON_UNDEFINED
+ };
// Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
// If the operands are not both numbers, jump to not_numbers.
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
Register scratch2,
Register scratch3,
Label* on_success,
- Label* on_not_smis);
+ Label* on_not_smis,
+ ConvertUndefined convert_undefined);
};
}
+void BinaryOpStub::Initialize() {}
+
+
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(rcx); // Save return address.
__ push(rdx);
__ push(rax);
// Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
__ Push(Smi::FromInt(MinorKey()));
- __ Push(Smi::FromInt(op_));
- __ Push(Smi::FromInt(operands_type_));
__ push(rcx); // Push return address.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- UNREACHABLE();
- // The int32 case is identical to the Smi case. We avoid creating this
- // ic state on x64.
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiCode(
+static void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* slow,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ Token::Value op) {
// Arguments to BinaryOpStub are in rdx and rax.
const Register left = rdx;
// We only generate heapnumber answers for overflowing calculations
// for the four basic arithmetic operations and logical right shift by 0.
bool generate_inline_heapnumber_results =
- (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
- (op_ == Token::ADD || op_ == Token::SUB ||
- op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
+ (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
+ (op == Token::ADD || op == Token::SUB ||
+ op == Token::MUL || op == Token::DIV || op == Token::SHR);
// Smi check of both operands. If op is BIT_OR, the check is delayed
// until after the OR operation.
Label use_fp_on_smis;
Label fail;
- if (op_ != Token::BIT_OR) {
+ if (op != Token::BIT_OR) {
Comment smi_check_comment(masm, "-- Smi check arguments");
__ JumpIfNotBothSmi(left, right, ¬_smis);
}
__ bind(&smi_values);
// Perform the operation.
Comment perform_smi(masm, "-- Perform smi operation");
- switch (op_) {
+ switch (op) {
case Token::ADD:
ASSERT(right.is(rax));
__ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
// operations on known smis (e.g., if the result of the operation
// overflowed the smi range).
__ bind(&use_fp_on_smis);
- if (op_ == Token::DIV || op_ == Token::MOD) {
+ if (op == Token::DIV || op == Token::MOD) {
// Restore left and right to rdx and rax.
__ movq(rdx, rcx);
__ movq(rax, rbx);
if (generate_inline_heapnumber_results) {
__ AllocateHeapNumber(rcx, rbx, slow);
Comment perform_float(masm, "-- Perform float operation on smis");
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ SmiToInteger32(left, left);
__ cvtqsi2sd(xmm0, left);
} else {
FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
// values that could be smi.
__ bind(¬_smis);
Comment done_comment(masm, "-- Enter non-smi code");
+ FloatingPointHelper::ConvertUndefined convert_undefined =
+ FloatingPointHelper::BAILOUT_ON_UNDEFINED;
+ // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
+ if (op == Token::BIT_AND ||
+ op == Token::BIT_OR ||
+ op == Token::BIT_XOR ||
+ op == Token::SAR ||
+ op == Token::SHL ||
+ op == Token::SHR) {
+ convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
+ }
FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
- &smi_values, &fail);
+ &smi_values, &fail, convert_undefined);
__ jmp(&smi_values);
__ bind(&fail);
}
-void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure) {
- switch (op_) {
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode);
+
+
+static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure,
+ Token::Value op,
+ OverwriteMode mode) {
+ switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV: {
FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- GenerateHeapResultAllocation(masm, allocation_failure);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, allocation_failure, mode);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
break;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
heap_number_map);
- switch (op_) {
+ switch (op) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
// Logical shift right can produce an unsigned int32 that is not
// an int32, and so is not in the smi range. Allocate a heap number
// in that case.
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ bind(&non_smi_shr_result);
Label allocation_failed;
__ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
// No fall-through from this generated code.
if (FLAG_debug_code) {
__ Abort("Unexpected fall-through in "
- "BinaryStub::GenerateFloatingPointCode.");
+ "BinaryStub_GenerateFloatingPointCode.");
}
}
-void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
Label left_not_string, call_runtime;
}
-void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label call_runtime;
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
}
// Code falls through if the result is not returned as either a smi or heap
if (call_runtime.is_linked()) {
__ bind(&call_runtime);
- GenerateCallRuntimeCode(masm);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
}
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- GenerateStringAddCode(masm);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateTypeTransition(masm);
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ // The int32 case is identical to the Smi case. We avoid creating this
+ // ic state on x64.
+ UNREACHABLE();
}
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
if (op_ == Token::ADD) {
// Handle string addition here, because it is the only operation
// that does not do a ToNumber conversion on the operands.
- GenerateStringAddCode(masm);
+ GenerateAddStrings(masm);
}
// Convert oddball arguments to numbers.
}
+static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
+ Register input,
+ Label* fail) {
+ Label ok;
+ __ JumpIfSmi(input, &ok, Label::kNear);
+ Register heap_number_map = r8;
+ Register scratch1 = r9;
+ Register scratch2 = r10;
+ // HeapNumbers containing 32bit integer values are also allowed.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, fail);
+ __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
+ // Convert, convert back, and compare the two doubles' bits.
+ __ cvttsd2siq(scratch2, xmm0);
+ __ cvtlsi2sd(xmm1, scratch2);
+ __ movq(scratch1, xmm0);
+ __ movq(scratch2, xmm1);
+ __ cmpq(scratch1, scratch2);
+ __ j(not_equal, fail);
+ __ bind(&ok);
+}
+
+
void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label gc_required, not_number;
- GenerateFloatingPointCode(masm, &gc_required, ¬_number);
+
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ if (left_type_ == BinaryOpIC::SMI) {
+ BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ BinaryOpStub_CheckSmiInput(masm, rax, ¬_number);
+ }
+
+ BinaryOpStub_GenerateFloatingPointCode(
+ masm, &gc_required, ¬_number, op_, mode_);
__ bind(¬_number);
GenerateTypeTransition(masm);
__ bind(&gc_required);
- GenerateCallRuntimeCode(masm);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
Label call_runtime, call_string_add_or_runtime;
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
+ BinaryOpStub_GenerateFloatingPointCode(
+ masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
- GenerateStringAddCode(masm);
+ GenerateAddStrings(masm);
}
__ bind(&call_runtime);
- GenerateCallRuntimeCode(masm);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
}
-void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure) {
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode) {
Label skip_allocation;
- OverwriteMode mode = mode_;
switch (mode) {
case OVERWRITE_LEFT: {
// If the argument in rdx is already an object, we skip the
Register scratch2,
Register scratch3,
Label* on_success,
- Label* on_not_smis) {
+ Label* on_not_smis,
+ ConvertUndefined convert_undefined) {
Register heap_number_map = scratch3;
Register smi_result = scratch1;
- Label done;
+ Label done, maybe_undefined_first, maybe_undefined_second, first_done;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Label first_smi;
__ JumpIfSmi(first, &first_smi, Label::kNear);
__ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, on_not_smis);
+ __ j(not_equal,
+ (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
+ ? &maybe_undefined_first
+ : on_not_smis);
// Convert HeapNumber to smi if possible.
__ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
__ j(not_equal, on_not_smis);
__ Integer32ToSmi(first, smi_result);
+ __ bind(&first_done);
__ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
__ bind(&first_smi);
__ AssertNotSmi(second);
__ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, on_not_smis);
+ __ j(not_equal,
+ (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
+ ? &maybe_undefined_second
+ : on_not_smis);
// Convert second to smi, if possible.
__ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
if (on_success != NULL) {
__ jmp(on_success);
} else {
- __ bind(&done);
+ __ jmp(&done);
+ }
+
+ __ bind(&maybe_undefined_first);
+ __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, on_not_smis);
+ __ xor_(first, first);
+ __ jmp(&first_done);
+
+ __ bind(&maybe_undefined_second);
+ __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, on_not_smis);
+ __ xor_(second, second);
+ if (on_success != NULL) {
+ __ jmp(on_success);
}
+ // Else: fall through.
+
+ __ bind(&done);
}
}
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+static void CheckInputType(MacroAssembler* masm,
+ Register input,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::HEAP_NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ j(not_equal, fail);
+ }
+ // We could be strict about symbol/string here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+static void BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(scratch,
+ FieldOperand(scratch, Map::kInstanceTypeOffset));
+ // Ensure that no non-strings have the symbol bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ testb(scratch, Immediate(kIsSymbolMask));
+ __ j(zero, label);
+}
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects, done;
+ Condition cc = GetCondition();
Factory* factory = masm->isolate()->factory();
- // Compare two smis if required.
- if (include_smi_compare_) {
- Label non_smi, smi_done;
- __ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
- __ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
- __ bind(&smi_done);
- __ movq(rax, rdx);
- __ ret(0);
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- Label ok;
- __ JumpIfNotSmi(rdx, &ok);
- __ JumpIfNotSmi(rax, &ok);
- __ Abort("CompareStub: smi operands");
- __ bind(&ok);
- }
+ Label miss;
+ CheckInputType(masm, rdx, left_, &miss);
+ CheckInputType(masm, rax, right_, &miss);
+
+ // Compare two smis.
+ Label non_smi, smi_done;
+ __ JumpIfNotBothSmi(rax, rdx, &non_smi);
+ __ subq(rdx, rax);
+ __ j(no_overflow, &smi_done);
+ __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
+ __ bind(&smi_done);
+ __ movq(rax, rdx);
+ __ ret(0);
+ __ bind(&non_smi);
// The compare stub returns a positive, negative, or zero 64-bit integer
// value in rax, corresponding to result of comparing the two inputs.
__ cmpq(rax, rdx);
__ j(not_equal, ¬_identical, Label::kNear);
- if (cc_ != equal) {
+ if (cc != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
Label check_for_nan;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(rax, NegativeComparisonResult(cc_));
+ __ Set(rax, NegativeComparisonResult(cc));
__ ret(0);
__ bind(&check_for_nan);
}
// Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
// so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- // We cannot set rax to EQUAL until just before return because
- // rax must be unchanged on jump to not_identical.
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(rax, EQUAL);
- __ ret(0);
- } else {
- Label heap_number;
- // If it's not a heap number, then return equal for (in)equality operator.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
- if (cc_ != equal) {
- // Call runtime on identical objects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, ¬_identical, Label::kNear);
- }
- __ Set(rax, EQUAL);
- __ ret(0);
+ Label heap_number;
+ // If it's not a heap number, then return equal for (in)equality operator.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ factory->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
+ if (cc != equal) {
+ // Call runtime on identical objects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, ¬_identical, Label::kNear);
+ }
+ __ Set(rax, EQUAL);
+ __ ret(0);
- __ bind(&heap_number);
- // It is a heap number, so return equal if it's not NaN.
- // For NaN, return 1 for every condition except greater and
- // greater-equal. Return -1 for them, so the comparison yields
- // false for all conditions except not-equal.
- __ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
- __ setcc(parity_even, rax);
- // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
- if (cc_ == greater_equal || cc_ == greater) {
- __ neg(rax);
- }
- __ ret(0);
+ __ bind(&heap_number);
+ // It is a heap number, so return equal if it's not NaN.
+ // For NaN, return 1 for every condition except greater and
+ // greater-equal. Return -1 for them, so the comparison yields
+ // false for all conditions except not-equal.
+ __ Set(rax, EQUAL);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm0);
+ __ setcc(parity_even, rax);
+ // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
+ if (cc == greater_equal || cc == greater) {
+ __ neg(rax);
}
+ __ ret(0);
__ bind(¬_identical);
}
- if (cc_ == equal) { // Both strict and non-strict.
+ if (cc == equal) { // Both strict and non-strict.
Label slow; // Fallthrough label.
// If we're doing a strict equality comparison, we don't have to do
// type conversion, so we generate code to do fast comparison for objects
// and oddballs. Non-smi numbers and strings still go through the usual
// slow-case code.
- if (strict_) {
+ if (strict()) {
// If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case.
{
}
// Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
- __ xorl(rax, rax);
- __ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ setcc(above, rax);
- __ setcc(below, rcx);
- __ subq(rax, rcx);
- __ ret(0);
+ Label non_number_comparison;
+ Label unordered;
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+ __ xorl(rax, rax);
+ __ xorl(rcx, rcx);
+ __ ucomisd(xmm0, xmm1);
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ Set(rax, 1);
- } else {
- __ Set(rax, -1);
- }
- __ ret(0);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ setcc(above, rax);
+ __ setcc(below, rcx);
+ __ subq(rax, rcx);
+ __ ret(0);
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc != not_equal);
+ if (cc == less || cc == less_equal) {
+ __ Set(rax, 1);
+ } else {
+ __ Set(rax, -1);
}
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
// Fast negative check for symbol-to-symbol equality.
Label check_for_strings;
- if (cc_ == equal) {
+ if (cc == equal) {
BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
rdx, rax, rcx, rbx, &check_unequal_objects);
// Inline comparison of ASCII strings.
- if (cc_ == equal) {
+ if (cc == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
rdx,
rax,
#endif
__ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
+ if (cc == equal && !strict()) {
// Not strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == equal) {
+ builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
builtin = Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
+ __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
}
// Restore return address on the stack.
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
- FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ testb(scratch, Immediate(kIsSymbolMask));
- __ j(zero, label);
+ __ bind(&miss);
+ GenerateMiss(masm);
}
Register InstanceofStub::right() { return no_reg; }
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == equal || cc_ == not_equal;
- stream->Add("CompareStub_%s", cc_name);
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+ ASSERT(state_ == CompareIC::HEAP_NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- Condition either_smi = masm->CheckEitherSmi(rax, rdx);
- __ j(either_smi, &generic_stub, Label::kNear);
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rdx, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rax, &miss);
+ }
+
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(rax, &right_smi, Label::kNear);
+ __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL);
__ j(not_equal, &maybe_undefined1, Label::kNear);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&left, Label::kNear);
+ __ bind(&right_smi);
+ __ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
+ __ cvtlsi2sd(xmm1, rcx);
+
+ __ bind(&left);
+ __ JumpIfSmi(rdx, &left_smi, Label::kNear);
+ __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL);
__ j(not_equal, &maybe_undefined2, Label::kNear);
-
- // Load left and right operand
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+ __ bind(&left_smi);
+ __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
+ __ cvtlsi2sd(xmm0, rcx);
+ __ bind(&done);
// Compare operands
__ ucomisd(xmm0, xmm1);
__ ret(0);
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ Cmp(rax, masm->isolate()->factory()->undefined_value());
__ j(not_equal, &miss);
+ __ JumpIfSmi(rdx, &unordered);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ jmp(&unordered);
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+ ASSERT(state_ == CompareIC::SYMBOL);
ASSERT(GetCondition() == equal);
// Registers containing left and right operands respectively.
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
};
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
};
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 15 bits RRRTTTOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 9, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 12, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure);
- void GenerateStringAddCode(MacroAssembler* masm);
- void GenerateCallRuntimeCode(MacroAssembler* masm);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
SetSourcePosition(expr->position());
// Call stub for +1/-1.
+ __ movq(rdx, rax);
+ __ Move(rax, Smi::FromInt(1));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- if (expr->op() == Token::INC) {
- __ Move(rdx, Smi::FromInt(1));
- } else {
- __ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(1));
- }
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = no_condition;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cc = CompareIC::ComputeCondition(op);
__ pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
}
-static bool HasInlinedSmiCode(Address address) {
+bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
-
- State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
-}
-
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
}
if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ if (FLAG_code_comments) {
+ HValue* hydrogen = instr->hydrogen_value();
+ if (hydrogen != NULL) {
+ if (hydrogen->IsChange()) {
+ HValue* changed_value = HChange::cast(hydrogen)->value();
+ int use_id = 0;
+ const char* use_mnemo = "dead";
+ if (hydrogen->UseCount() >= 1) {
+ HValue* use_value = hydrogen->uses().value();
+ use_id = use_value->id();
+ use_mnemo = use_value->Mnemonic();
+ }
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
+ current_instruction_, instr->Mnemonic(),
+ changed_value->id(), changed_value->Mnemonic(),
+ use_id, use_mnemo);
+ } else {
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
+ instr->Mnemonic(), hydrogen->id());
+ }
+ } else {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ }
+ }
instr->CompileToNative(this);
}
}
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
+ Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
+ for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
TEST(SetFunctionEntryHook) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_use_inlining = false;
// Test setting and resetting the entry hook.
// Nulling it should always succeed.
assertTrue(0 === (Math.floor((zero_in_array[0] | 0) / -1) | 0));
}
+test_div_no_deopt_minus_zero();
test_div_no_deopt_minus_zero();
%OptimizeFunctionOnNextCall(test_div_no_deopt_minus_zero);
test_div_no_deopt_minus_zero();