+2011-09-27 Yuqiang Xian <yuqiang.xian@intel.com>
+
+ Value profiling in baseline JIT for JSVALUE32_64
+ https://bugs.webkit.org/show_bug.cgi?id=68750
+
+ Reviewed by Geoff Garen.
+
+ * jit/JITArithmetic32_64.cpp:
+ (JSC::JIT::emit_op_mul):
+ (JSC::JIT::emit_op_div):
+ * jit/JITCall32_64.cpp:
+ (JSC::JIT::emit_op_call_put_result):
+ * jit/JITOpcodes32_64.cpp:
+ (JSC::JIT::emit_op_resolve):
+ (JSC::JIT::emit_op_resolve_base):
+ (JSC::JIT::emit_op_resolve_skip):
+ (JSC::JIT::emit_op_resolve_global):
+ (JSC::JIT::emitSlow_op_resolve_global):
+ (JSC::JIT::emit_op_resolve_with_base):
+ (JSC::JIT::emit_op_resolve_with_this):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::emit_op_method_check):
+ (JSC::JIT::emit_op_get_by_val):
+ (JSC::JIT::emitSlow_op_get_by_val):
+ (JSC::JIT::emit_op_get_by_id):
+ (JSC::JIT::emitSlow_op_get_by_id):
+ (JSC::JIT::emit_op_get_scoped_var):
+ (JSC::JIT::emit_op_get_global_var):
+ * jit/JITStubCall.h:
+ (JSC::JITStubCall::callWithValueProfiling):
+
2011-09-28 Yuqiang Xian <yuqiang.xian@intel.com>
Wrong integer checks in JSVALUE32_64 DFG JIT
}
divDouble(fpRegT1, fpRegT0);
-#if ENABLE(DFG_JIT)
+#if ENABLE(VALUE_PROFILER)
// Is the result actually an integer? The DFG JIT would really like to know. If it's
// not an integer, we increment a count. If this together with the slow case counter
// are below threshold then the DFG JIT will compile this division with a specualtion
subDouble(fpRegT0, fpRegT1);
emitStoreDouble(dst, fpRegT1);
break;
- case op_div:
+ case op_div: {
emitLoadDouble(op1, fpRegT1);
divDouble(fpRegT0, fpRegT1);
+
+#if ENABLE(VALUE_PROFILER)
+ // Is the result actually an integer? The DFG JIT would really like to know. If it's
+ // not an integer, we increment a count. If this together with the slow case counter
+ // are below threshold then the DFG JIT will compile this division with a specualtion
+ // that the remainder is zero.
+
+ // As well, there are cases where a double result here would cause an important field
+ // in the heap to sometimes have doubles in it, resulting in double predictions getting
+ // propagated to a use site where it might cause damage (such as the index to an array
+ // access). So if we are DFG compiling anything in the program, we want this code to
+ // ensure that it produces integers whenever possible.
+
+ // FIXME: This will fail to convert to integer if the result is zero. We should
+ // distinguish between positive zero and negative zero here.
+
+ JumpList notInteger;
+ branchConvertDoubleToInt32(fpRegT1, regT2, notInteger, fpRegT0);
+ // If we've got an integer, we might as well make that the result of the division.
+ emitStoreInt32(dst, regT2);
+ Jump isInteger = jump();
+ notInteger.link(this);
+ add32(Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+ emitStoreDouble(dst, fpRegT1);
+ isInteger.link(this);
+#else
emitStoreDouble(dst, fpRegT1);
+#endif
break;
+ }
case op_jless:
emitLoadDouble(op1, fpRegT2);
addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
subDouble(fpRegT2, fpRegT0);
emitStoreDouble(dst, fpRegT0);
break;
- case op_div:
+ case op_div: {
emitLoadDouble(op2, fpRegT2);
divDouble(fpRegT2, fpRegT0);
+#if ENABLE(VALUE_PROFILER)
+ // Is the result actually an integer? The DFG JIT would really like to know. If it's
+ // not an integer, we increment a count. If this together with the slow case counter
+ // are below threshold then the DFG JIT will compile this division with a specualtion
+ // that the remainder is zero.
+
+ // As well, there are cases where a double result here would cause an important field
+ // in the heap to sometimes have doubles in it, resulting in double predictions getting
+ // propagated to a use site where it might cause damage (such as the index to an array
+ // access). So if we are DFG compiling anything in the program, we want this code to
+ // ensure that it produces integers whenever possible.
+
+ // FIXME: This will fail to convert to integer if the result is zero. We should
+ // distinguish between positive zero and negative zero here.
+
+ JumpList notInteger;
+ branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
+ // If we've got an integer, we might as well make that the result of the division.
+ emitStoreInt32(dst, regT2);
+ Jump isInteger = jump();
+ notInteger.link(this);
+ add32(Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+ emitStoreDouble(dst, fpRegT0);
+ isInteger.link(this);
+#else
emitStoreDouble(dst, fpRegT0);
+#endif
break;
+ }
case op_jless:
emitLoadDouble(op2, fpRegT1);
addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
unsigned op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+#if ENABLE(VALUE_PROFILER)
+ m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
+#endif
+
JumpList notInt32Op1;
JumpList notInt32Op2;
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
negZero.link(this);
+#if ENABLE(VALUE_PROFILER)
+ // We only get here if we have a genuine negative zero. Record this,
+ // so that the speculative JIT knows that we failed speculation
+ // because of a negative zero.
+ add32(Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+#endif
overflow.link(this);
if (!supportsFloatingPoint()) {
unsigned op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+#if ENABLE(VALUE_PROFILER)
+ m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
+#endif
+
if (!supportsFloatingPoint()) {
addSlowCase(jump());
return;
void JIT::emit_op_call_put_result(Instruction* instruction)
{
int dst = instruction[1].u.operand;
+ emitValueProfilingSite(FirstProfilingSite);
emitStore(dst, regT1, regT0);
}
{
JITStubCall stubCall(this, cti_op_resolve);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
+ stubCall.callWithValueProfiling(currentInstruction[1].u.operand, FirstProfilingSite);
}
void JIT::emit_op_to_primitive(Instruction* currentInstruction)
{
JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
+ stubCall.callWithValueProfiling(currentInstruction[1].u.operand, FirstProfilingSite);
}
void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_resolve_skip);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
+ stubCall.callWithValueProfiling(currentInstruction[1].u.operand, FirstProfilingSite);
}
void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic)
load32(Address(regT3, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), regT3);
load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
+ emitValueProfilingSite(FirstProfilingSite);
emitStore(dst, regT1, regT0);
map(m_bytecodeOffset + (dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global)), dst, regT1, regT0);
}
JITStubCall stubCall(this, cti_op_resolve_global);
stubCall.addArgument(TrustedImmPtr(ident));
stubCall.addArgument(Imm32(currentIndex));
- stubCall.call(dst);
+ stubCall.callWithValueProfiling(dst, SubsequentProfilingSite);
}
void JIT::emit_op_not(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_resolve_with_base);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call(currentInstruction[2].u.operand);
+ stubCall.callWithValueProfiling(currentInstruction[2].u.operand, FirstProfilingSite);
}
void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_resolve_with_this);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call(currentInstruction[2].u.operand);
+ stubCall.callWithValueProfiling(currentInstruction[2].u.operand, FirstProfilingSite);
}
void JIT::emit_op_throw(Instruction* currentInstruction)
compileGetByIdHotPath();
match.link(this);
+ emitValueProfilingSite(FirstProfilingSite);
emitStore(dst, regT1, regT0);
map(m_bytecodeOffset + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+ emitValueProfilingSite(FirstProfilingSite);
emitStore(dst, regT1, regT0);
map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
}
stubCall.addArgument(base);
stubCall.addArgument(property);
stubCall.call(dst);
+
+ emitValueProfilingSite(SubsequentProfilingSite);
}
void JIT::emit_op_put_by_val(Instruction* currentInstruction)
emitLoad(base, regT1, regT0);
emitJumpSlowCaseIfNotJSCell(base, regT1);
compileGetByIdHotPath();
+ emitValueProfilingSite(FirstProfilingSite);
emitStore(dst, regT1, regT0);
map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
}
int ident = currentInstruction[3].u.operand;
compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
+ emitValueProfilingSite(SubsequentProfilingSite);
}
void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
emitLoad(index, regT1, regT0, regT2);
+ emitValueProfilingSite(FirstProfilingSite);
emitStore(dst, regT1, regT0);
map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
}
loadPtr(&globalObject->m_registers, regT2);
emitLoad(index, regT1, regT0, regT2);
+ emitValueProfilingSite(FirstProfilingSite);
emitStore(dst, regT1, regT0);
map(m_bytecodeOffset + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
}
return call;
}
- JIT::Call callWithValueProfiling(unsigned dst, JIT::ValueProfilingSiteKind)
+ JIT::Call callWithValueProfiling(unsigned dst, JIT::ValueProfilingSiteKind kind)
{
- return call(dst);
+ ASSERT(m_returnType == Value || m_returnType == Cell);
+ JIT::Call call = this->call();
+ m_jit->emitValueProfilingSite(kind);
+ if (m_returnType == Value)
+ m_jit->emitStore(dst, JIT::regT1, JIT::regT0);
+ else
+ m_jit->emitStoreCell(dst, JIT::returnValueRegister);
+ return call;
}
#else
JIT::Call call(unsigned dst) // dst is a virtual register.