bool generate_code_to_calculate_answer = true;
if (ShouldGenerateFPCode()) {
+ // DIV has neither SmiSmi fast code nor specialized slow code.
+ // So don't try to patch a DIV Stub.
if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
- case Token::DIV:
GenerateTypeTransition(masm); // Tail call.
generate_code_to_calculate_answer = false;
break;
+ case Token::DIV:
+ // DIV has neither SmiSmi fast code nor specialized slow code.
+ // So don't try to patch a DIV Stub.
+ break;
+
default:
break;
}
// HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
// r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
Label r1_is_not_smi;
- if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
+ if ((runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) &&
+ HasSmiSmiFastPath()) {
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &r1_is_not_smi);
GenerateTypeTransition(masm); // Tail call.
rhs_(rhs),
constant_rhs_(constant_rhs),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
- runtime_operands_type_(BinaryOpIC::DEFAULT),
+ runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
name_(NULL) { }
GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
return lhs_is_r0 ? r1 : r0;
}
+ bool HasSmiSmiFastPath() {
+ return op_ != Token::DIV;
+ }
+
bool ShouldGenerateSmiCode() {
return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&