2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // This probably does not belong here; adding here for now as a quick Windows build fix.
32 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
33 #include "MacroAssembler.h"
34 JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
37 #include "CodeBlock.h"
38 #include <wtf/CryptographicallyRandomNumber.h>
39 #include "DFGNode.h" // for DFG_SUCCESS_STATS
40 #include "Interpreter.h"
41 #include "JITInlineMethods.h"
42 #include "JITStubCall.h"
44 #include "JSFunction.h"
45 #include "LinkBuffer.h"
46 #include "RepatchBuffer.h"
47 #include "ResultType.h"
48 #include "SamplingTool.h"
54 void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
56 RepatchBuffer repatchBuffer(codeblock);
57 repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
60 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
62 RepatchBuffer repatchBuffer(codeblock);
63 repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
66 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
68 RepatchBuffer repatchBuffer(codeblock);
69 repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
72 JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
73 : m_interpreter(globalData->interpreter)
74 , m_globalData(globalData)
75 , m_codeBlock(codeBlock)
76 , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
77 , m_bytecodeOffset((unsigned)-1)
79 , m_jumpTargetIndex(0)
80 , m_mappedBytecodeOffset((unsigned)-1)
81 , m_mappedVirtualRegisterIndex(RegisterFile::ReturnPC)
82 , m_mappedTag((RegisterID)-1)
83 , m_mappedPayload((RegisterID)-1)
85 , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
86 , m_jumpTargetsPosition(0)
88 #if USE(OS_RANDOMNESS)
89 , m_randomGenerator(cryptographicallyRandomNumber())
91 , m_randomGenerator(static_cast<unsigned>(randomNumber() * 0xFFFFFFF))
97 void JIT::emitOptimizationCheck(OptimizationCheckKind kind)
99 if (!canBeOptimized())
102 Jump skipOptimize = branchAdd32(Signed, TrustedImm32(kind == LoopOptimizationCheck ? Options::executionCounterIncrementForLoop : Options::executionCounterIncrementForReturn), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()));
103 JITStubCall stubCall(this, kind == LoopOptimizationCheck ? cti_optimize_from_loop : cti_optimize_from_ret);
104 if (kind == LoopOptimizationCheck)
105 stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
107 skipOptimize.link(this);
112 void JIT::emitTimeoutCheck()
114 Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), AbsoluteAddress(&m_globalData->m_timeoutCount));
115 JITStubCall stubCall(this, cti_timeout_check);
116 stubCall.addArgument(regT1, regT0); // save last result registers.
117 stubCall.call(regT0);
118 store32(regT0, &m_globalData->m_timeoutCount);
119 stubCall.getArgument(0, regT1, regT0); // reload last result registers.
120 skipTimeout.link(this);
122 #elif USE(JSVALUE32_64)
123 void JIT::emitTimeoutCheck()
125 Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
126 JITStubCall stubCall(this, cti_timeout_check);
127 stubCall.addArgument(regT1, regT0); // save last result registers.
128 stubCall.call(timeoutCheckRegister);
129 stubCall.getArgument(0, regT1, regT0); // reload last result registers.
130 skipTimeout.link(this);
133 void JIT::emitTimeoutCheck()
135 Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
136 JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister);
137 skipTimeout.link(this);
139 killLastResultRegister();
143 #define NEXT_OPCODE(name) \
144 m_bytecodeOffset += OPCODE_LENGTH(name); \
147 #if USE(JSVALUE32_64)
148 #define DEFINE_BINARY_OP(name) \
150 JITStubCall stubCall(this, cti_##name); \
151 stubCall.addArgument(currentInstruction[2].u.operand); \
152 stubCall.addArgument(currentInstruction[3].u.operand); \
153 stubCall.call(currentInstruction[1].u.operand); \
157 #define DEFINE_UNARY_OP(name) \
159 JITStubCall stubCall(this, cti_##name); \
160 stubCall.addArgument(currentInstruction[2].u.operand); \
161 stubCall.call(currentInstruction[1].u.operand); \
165 #else // USE(JSVALUE32_64)
167 #define DEFINE_BINARY_OP(name) \
169 JITStubCall stubCall(this, cti_##name); \
170 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
171 stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
172 stubCall.call(currentInstruction[1].u.operand); \
176 #define DEFINE_UNARY_OP(name) \
178 JITStubCall stubCall(this, cti_##name); \
179 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
180 stubCall.call(currentInstruction[1].u.operand); \
183 #endif // USE(JSVALUE32_64)
185 #define DEFINE_OP(name) \
187 emit_##name(currentInstruction); \
191 #define DEFINE_SLOWCASE_OP(name) \
193 emitSlow_##name(currentInstruction, iter); \
197 void JIT::privateCompileMainPass()
199 Instruction* instructionsBegin = m_codeBlock->instructions().begin();
200 unsigned instructionCount = m_codeBlock->instructions().size();
202 m_globalResolveInfoIndex = 0;
203 m_callLinkInfoIndex = 0;
205 for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
206 Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
207 ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
209 #if ENABLE(OPCODE_SAMPLING)
210 if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
211 sampleInstruction(currentInstruction);
216 killLastResultRegister();
219 m_labels[m_bytecodeOffset] = label();
221 #if ENABLE(JIT_VERBOSE)
222 dataLog("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
225 switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
226 DEFINE_BINARY_OP(op_del_by_val)
227 DEFINE_BINARY_OP(op_in)
228 DEFINE_BINARY_OP(op_less)
229 DEFINE_BINARY_OP(op_lesseq)
230 DEFINE_BINARY_OP(op_greater)
231 DEFINE_BINARY_OP(op_greatereq)
232 DEFINE_UNARY_OP(op_is_function)
233 DEFINE_UNARY_OP(op_is_object)
234 DEFINE_UNARY_OP(op_typeof)
241 DEFINE_OP(op_call_eval)
242 DEFINE_OP(op_call_varargs)
244 DEFINE_OP(op_construct)
245 DEFINE_OP(op_create_this)
246 DEFINE_OP(op_convert_this)
247 DEFINE_OP(op_init_lazy_reg)
248 DEFINE_OP(op_create_arguments)
250 DEFINE_OP(op_del_by_id)
254 DEFINE_OP(op_create_activation)
256 DEFINE_OP(op_eq_null)
257 DEFINE_OP(op_get_by_id)
258 DEFINE_OP(op_get_arguments_length)
259 DEFINE_OP(op_get_by_val)
260 DEFINE_OP(op_get_argument_by_val)
261 DEFINE_OP(op_get_by_pname)
262 DEFINE_OP(op_get_global_var)
263 DEFINE_OP(op_get_pnames)
264 DEFINE_OP(op_get_scoped_var)
265 DEFINE_OP(op_check_has_instance)
266 DEFINE_OP(op_instanceof)
267 DEFINE_OP(op_is_undefined)
268 DEFINE_OP(op_is_boolean)
269 DEFINE_OP(op_is_number)
270 DEFINE_OP(op_is_string)
271 DEFINE_OP(op_jeq_null)
274 DEFINE_OP(op_jmp_scopes)
275 DEFINE_OP(op_jneq_null)
276 DEFINE_OP(op_jneq_ptr)
278 DEFINE_OP(op_jlesseq)
279 DEFINE_OP(op_jgreater)
280 DEFINE_OP(op_jgreatereq)
282 DEFINE_OP(op_jnlesseq)
283 DEFINE_OP(op_jngreater)
284 DEFINE_OP(op_jngreatereq)
287 DEFINE_OP(op_loop_hint)
288 DEFINE_OP(op_loop_if_less)
289 DEFINE_OP(op_loop_if_lesseq)
290 DEFINE_OP(op_loop_if_greater)
291 DEFINE_OP(op_loop_if_greatereq)
292 DEFINE_OP(op_loop_if_true)
293 DEFINE_OP(op_loop_if_false)
295 DEFINE_OP(op_method_check)
301 DEFINE_OP(op_neq_null)
302 DEFINE_OP(op_new_array)
303 DEFINE_OP(op_new_array_buffer)
304 DEFINE_OP(op_new_func)
305 DEFINE_OP(op_new_func_exp)
306 DEFINE_OP(op_new_object)
307 DEFINE_OP(op_new_regexp)
308 DEFINE_OP(op_next_pname)
310 DEFINE_OP(op_nstricteq)
311 DEFINE_OP(op_pop_scope)
312 DEFINE_OP(op_post_dec)
313 DEFINE_OP(op_post_inc)
314 DEFINE_OP(op_pre_dec)
315 DEFINE_OP(op_pre_inc)
316 DEFINE_OP(op_profile_did_call)
317 DEFINE_OP(op_profile_will_call)
318 DEFINE_OP(op_push_new_scope)
319 DEFINE_OP(op_push_scope)
320 case op_put_by_id_transition_direct:
321 case op_put_by_id_transition_normal:
322 DEFINE_OP(op_put_by_id)
323 DEFINE_OP(op_put_by_index)
324 DEFINE_OP(op_put_by_val)
325 DEFINE_OP(op_put_getter_setter)
326 DEFINE_OP(op_put_global_var)
327 DEFINE_OP(op_put_scoped_var)
328 DEFINE_OP(op_resolve)
329 DEFINE_OP(op_resolve_base)
330 DEFINE_OP(op_ensure_property_exists)
331 DEFINE_OP(op_resolve_global)
332 DEFINE_OP(op_resolve_global_dynamic)
333 DEFINE_OP(op_resolve_skip)
334 DEFINE_OP(op_resolve_with_base)
335 DEFINE_OP(op_resolve_with_this)
337 DEFINE_OP(op_call_put_result)
338 DEFINE_OP(op_ret_object_or_this)
340 DEFINE_OP(op_urshift)
342 DEFINE_OP(op_stricteq)
344 DEFINE_OP(op_switch_char)
345 DEFINE_OP(op_switch_imm)
346 DEFINE_OP(op_switch_string)
347 DEFINE_OP(op_tear_off_activation)
348 DEFINE_OP(op_tear_off_arguments)
350 DEFINE_OP(op_throw_reference_error)
351 DEFINE_OP(op_to_jsnumber)
352 DEFINE_OP(op_to_primitive)
354 case op_get_array_length:
355 case op_get_by_id_chain:
356 case op_get_by_id_generic:
357 case op_get_by_id_proto:
358 case op_get_by_id_self:
359 case op_get_by_id_getter_chain:
360 case op_get_by_id_getter_proto:
361 case op_get_by_id_getter_self:
362 case op_get_by_id_custom_chain:
363 case op_get_by_id_custom_proto:
364 case op_get_by_id_custom_self:
365 case op_get_string_length:
366 case op_put_by_id_generic:
367 case op_put_by_id_replace:
368 case op_put_by_id_transition:
369 ASSERT_NOT_REACHED();
373 ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
376 // Reset this, in order to guard its use with ASSERTs.
377 m_bytecodeOffset = (unsigned)-1;
381 void JIT::privateCompileLinkPass()
383 unsigned jmpTableCount = m_jmpTable.size();
384 for (unsigned i = 0; i < jmpTableCount; ++i)
385 m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
389 void JIT::privateCompileSlowCases()
391 Instruction* instructionsBegin = m_codeBlock->instructions().begin();
393 m_propertyAccessInstructionIndex = 0;
394 m_globalResolveInfoIndex = 0;
395 m_callLinkInfoIndex = 0;
397 #if !ASSERT_DISABLED && ENABLE(VALUE_PROFILER)
398 // Use this to assert that slow-path code associates new profiling sites with existing
399 // ValueProfiles rather than creating new ones. This ensures that for a given instruction
400 // (say, get_by_id) we get combined statistics for both the fast-path executions of that
401 // instructions and the slow-path executions. Furthermore, if the slow-path code created
402 // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
403 // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
404 unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
407 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
409 killLastResultRegister();
412 m_bytecodeOffset = iter->to;
414 unsigned firstTo = m_bytecodeOffset;
416 Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
418 #if ENABLE(VALUE_PROFILER)
419 RareCaseProfile* rareCaseProfile = 0;
420 if (shouldEmitProfiling())
421 rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
424 #if ENABLE(JIT_VERBOSE)
425 dataLog("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
428 switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
429 DEFINE_SLOWCASE_OP(op_add)
430 DEFINE_SLOWCASE_OP(op_bitand)
431 DEFINE_SLOWCASE_OP(op_bitor)
432 DEFINE_SLOWCASE_OP(op_bitxor)
433 DEFINE_SLOWCASE_OP(op_call)
434 DEFINE_SLOWCASE_OP(op_call_eval)
435 DEFINE_SLOWCASE_OP(op_call_varargs)
436 DEFINE_SLOWCASE_OP(op_construct)
437 DEFINE_SLOWCASE_OP(op_convert_this)
438 DEFINE_SLOWCASE_OP(op_create_this)
439 DEFINE_SLOWCASE_OP(op_div)
440 DEFINE_SLOWCASE_OP(op_eq)
441 DEFINE_SLOWCASE_OP(op_get_by_id)
442 DEFINE_SLOWCASE_OP(op_get_arguments_length)
443 DEFINE_SLOWCASE_OP(op_get_by_val)
444 DEFINE_SLOWCASE_OP(op_get_argument_by_val)
445 DEFINE_SLOWCASE_OP(op_get_by_pname)
446 DEFINE_SLOWCASE_OP(op_check_has_instance)
447 DEFINE_SLOWCASE_OP(op_instanceof)
448 DEFINE_SLOWCASE_OP(op_jfalse)
449 DEFINE_SLOWCASE_OP(op_jless)
450 DEFINE_SLOWCASE_OP(op_jlesseq)
451 DEFINE_SLOWCASE_OP(op_jgreater)
452 DEFINE_SLOWCASE_OP(op_jgreatereq)
453 DEFINE_SLOWCASE_OP(op_jnless)
454 DEFINE_SLOWCASE_OP(op_jnlesseq)
455 DEFINE_SLOWCASE_OP(op_jngreater)
456 DEFINE_SLOWCASE_OP(op_jngreatereq)
457 DEFINE_SLOWCASE_OP(op_jtrue)
458 DEFINE_SLOWCASE_OP(op_loop_if_less)
459 DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
460 DEFINE_SLOWCASE_OP(op_loop_if_greater)
461 DEFINE_SLOWCASE_OP(op_loop_if_greatereq)
462 DEFINE_SLOWCASE_OP(op_loop_if_true)
463 DEFINE_SLOWCASE_OP(op_loop_if_false)
464 DEFINE_SLOWCASE_OP(op_lshift)
465 DEFINE_SLOWCASE_OP(op_method_check)
466 DEFINE_SLOWCASE_OP(op_mod)
467 DEFINE_SLOWCASE_OP(op_mul)
468 DEFINE_SLOWCASE_OP(op_negate)
469 DEFINE_SLOWCASE_OP(op_neq)
470 DEFINE_SLOWCASE_OP(op_new_array)
471 DEFINE_SLOWCASE_OP(op_new_object)
472 DEFINE_SLOWCASE_OP(op_new_func)
473 DEFINE_SLOWCASE_OP(op_new_func_exp)
474 DEFINE_SLOWCASE_OP(op_not)
475 DEFINE_SLOWCASE_OP(op_nstricteq)
476 DEFINE_SLOWCASE_OP(op_post_dec)
477 DEFINE_SLOWCASE_OP(op_post_inc)
478 DEFINE_SLOWCASE_OP(op_pre_dec)
479 DEFINE_SLOWCASE_OP(op_pre_inc)
480 case op_put_by_id_transition_direct:
481 case op_put_by_id_transition_normal:
482 DEFINE_SLOWCASE_OP(op_put_by_id)
483 DEFINE_SLOWCASE_OP(op_put_by_val)
484 DEFINE_SLOWCASE_OP(op_resolve_global)
485 DEFINE_SLOWCASE_OP(op_resolve_global_dynamic)
486 DEFINE_SLOWCASE_OP(op_rshift)
487 DEFINE_SLOWCASE_OP(op_urshift)
488 DEFINE_SLOWCASE_OP(op_stricteq)
489 DEFINE_SLOWCASE_OP(op_sub)
490 DEFINE_SLOWCASE_OP(op_to_jsnumber)
491 DEFINE_SLOWCASE_OP(op_to_primitive)
493 ASSERT_NOT_REACHED();
496 ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen.");
497 ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
499 #if ENABLE(VALUE_PROFILER)
500 if (shouldEmitProfiling())
501 add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
504 emitJumpSlowToHot(jump(), 0);
507 ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size());
508 ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
509 #if ENABLE(VALUE_PROFILER)
510 ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
514 // Reset this, in order to guard its use with ASSERTs.
515 m_bytecodeOffset = (unsigned)-1;
519 ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo& info, LinkBuffer &linkBuffer)
521 ASSERT(bytecodeIndex != std::numeric_limits<unsigned>::max());
522 info.bytecodeIndex = bytecodeIndex;
523 info.callReturnLocation = linkBuffer.locationOf(callReturnLocation);
524 info.hotPathBegin = linkBuffer.locationOf(hotPathBegin);
528 CodeLocationDataLabelPtr structureToCompareLocation = linkBuffer.locationOf(methodCheckStructureToCompare);
529 info.patch.baseline.methodCheckProtoObj = MacroAssembler::differenceBetweenCodePtr(structureToCompareLocation, linkBuffer.locationOf(methodCheckProtoObj));
530 info.patch.baseline.methodCheckProtoStructureToCompare = MacroAssembler::differenceBetweenCodePtr(structureToCompareLocation, linkBuffer.locationOf(methodCheckProtoStructureToCompare));
531 info.patch.baseline.methodCheckPutFunction = MacroAssembler::differenceBetweenCodePtr(structureToCompareLocation, linkBuffer.locationOf(methodCheckPutFunction));
532 // No break - fall through to GetById.
535 CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin);
536 info.patch.baseline.u.get.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureToCompare));
537 info.patch.baseline.u.get.structureCheck = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureCheck));
539 info.patch.baseline.u.get.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel));
541 info.patch.baseline.u.get.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel1));
542 info.patch.baseline.u.get.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel2));
544 info.patch.baseline.u.get.putResult = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getPutResult));
545 info.patch.baseline.u.get.coldPathBegin = MacroAssembler::differenceBetweenCodePtr(linkBuffer.locationOf(getColdPathBegin), linkBuffer.locationOf(callReturnLocation));
549 CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin);
550 info.patch.baseline.u.put.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putStructureToCompare));
552 info.patch.baseline.u.put.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel));
554 info.patch.baseline.u.put.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel1));
555 info.patch.baseline.u.put.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel2));
561 JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort effort)
563 #if ENABLE(JIT_VERBOSE_OSR)
564 printf("Compiling JIT code!\n");
567 #if ENABLE(VALUE_PROFILER)
568 DFG::CapabilityLevel level = m_codeBlock->canCompileWithDFG();
570 case DFG::CannotCompile:
571 m_canBeOptimized = false;
572 m_shouldEmitProfiling = false;
575 m_canBeOptimized = false;
576 m_canBeOptimizedOrInlined = true;
577 m_shouldEmitProfiling = true;
579 case DFG::CanCompile:
580 m_canBeOptimized = true;
581 m_canBeOptimizedOrInlined = true;
582 m_shouldEmitProfiling = true;
585 ASSERT_NOT_REACHED();
590 // Just add a little bit of randomness to the codegen
591 if (m_randomGenerator.getUint32() & 1)
594 preserveReturnAddressAfterCall(regT2);
595 emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
596 emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
598 Label beginLabel(this);
600 sampleCodeBlock(m_codeBlock);
601 #if ENABLE(OPCODE_SAMPLING)
602 sampleInstruction(m_codeBlock->instructions().begin());
605 Jump registerFileCheck;
606 if (m_codeBlock->codeType() == FunctionCode) {
608 #if DFG_ENABLE(SUCCESS_STATS)
609 static SamplingCounter counter("orignalJIT");
614 #if ENABLE(VALUE_PROFILER)
615 ASSERT(m_bytecodeOffset == (unsigned)-1);
616 if (shouldEmitProfiling()) {
617 for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
618 // If this is a constructor, then we want to put in a dummy profiling site (to
619 // keep things consistent) but we don't actually want to record the dummy value.
620 if (m_codeBlock->m_isConstructor && !argument)
622 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
624 loadPtr(Address(callFrameRegister, offset), regT0);
625 #elif USE(JSVALUE32_64)
626 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
627 load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
629 emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
634 addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
635 registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1);
638 Label functionBody = label();
640 #if ENABLE(VALUE_PROFILER)
641 if (canBeOptimized())
642 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->m_executionEntryCount));
645 privateCompileMainPass();
646 privateCompileLinkPass();
647 privateCompileSlowCases();
650 if (m_codeBlock->codeType() == FunctionCode) {
651 registerFileCheck.link(this);
652 m_bytecodeOffset = 0;
653 JITStubCall(this, cti_register_file_check).call();
655 m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
659 arityCheck = label();
660 preserveReturnAddressAfterCall(regT2);
661 emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
662 emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
664 load32(payloadFor(RegisterFile::ArgumentCount), regT1);
665 branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
667 m_bytecodeOffset = 0;
668 JITStubCall(this, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck).call(callFrameRegister);
670 m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
676 ASSERT(m_jmpTable.isEmpty());
678 LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock, effort);
679 if (patchBuffer.didFailToAllocate())
682 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
683 for (unsigned i = 0; i < m_switches.size(); ++i) {
684 SwitchRecord record = m_switches[i];
685 unsigned bytecodeOffset = record.bytecodeOffset;
687 if (record.type != SwitchRecord::String) {
688 ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
689 ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
691 record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
693 for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
694 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
695 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
698 ASSERT(record.type == SwitchRecord::String);
700 record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
702 StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
703 for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
704 unsigned offset = it->second.branchOffset;
705 it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
710 for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
711 HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
712 handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
715 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
717 patchBuffer.link(iter->from, FunctionPtr(iter->to));
720 if (m_codeBlock->needsCallReturnIndices()) {
721 m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
722 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
723 m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset));
726 m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size());
727 for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i)
728 m_propertyAccessCompilationInfo[i].copyToStubInfo(m_codeBlock->structureStubInfo(i), patchBuffer);
729 m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size());
730 for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
731 CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
732 info.callType = m_callStructureStubCompilationInfo[i].callType;
733 info.bytecodeIndex = m_callStructureStubCompilationInfo[i].bytecodeIndex;
734 info.callReturnLocation = CodeLocationLabel(patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation));
735 info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
736 info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
738 unsigned methodCallCount = m_methodCallCompilationInfo.size();
739 m_codeBlock->addMethodCallLinkInfos(methodCallCount);
740 for (unsigned i = 0; i < methodCallCount; ++i) {
741 MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i);
742 info.bytecodeIndex = m_methodCallCompilationInfo[i].bytecodeIndex;
743 info.cachedStructure.setLocation(patchBuffer.locationOf(m_methodCallCompilationInfo[i].structureToCompare));
744 info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
747 #if ENABLE(DFG_JIT) || ENABLE(LLINT)
748 if (canBeOptimizedOrInlined()
753 CompactJITCodeMap::Encoder jitCodeMapEncoder;
754 for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
755 if (m_labels[bytecodeOffset].isSet())
756 jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
758 m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
762 if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck)
763 *functionEntryArityCheck = patchBuffer.locationOf(arityCheck);
765 CodeRef result = patchBuffer.finalizeCode();
767 m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
768 static_cast<double>(result.size()) /
769 static_cast<double>(m_codeBlock->instructions().size()));
771 m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
773 #if ENABLE(JIT_VERBOSE)
774 dataLog("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
777 return JITCode(result, JITCode::BaselineJIT);
780 void JIT::linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, JSGlobalData* globalData, CodeSpecializationKind kind)
782 RepatchBuffer repatchBuffer(callerCodeBlock);
784 ASSERT(!callLinkInfo->isLinked());
785 callLinkInfo->callee.set(*globalData, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
786 callLinkInfo->lastSeenCallee.set(*globalData, callerCodeBlock->ownerExecutable(), callee);
787 repatchBuffer.relink(callLinkInfo->hotPathOther, code);
790 calleeCodeBlock->linkIncomingCall(callLinkInfo);
792 // Patch the slow patch so we do not continue to try to link.
793 if (kind == CodeForCall) {
794 repatchBuffer.relink(CodeLocationNearCall(callLinkInfo->callReturnLocation), globalData->jitStubs->ctiVirtualCall());
798 ASSERT(kind == CodeForConstruct);
799 repatchBuffer.relink(CodeLocationNearCall(callLinkInfo->callReturnLocation), globalData->jitStubs->ctiVirtualConstruct());
804 #endif // ENABLE(JIT)