2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
34 /* Deprecated: Please use JITStubCall instead. */
36 ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
38 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
39 peek(dst, argumentStackOffset);
42 ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
44 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
47 ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
49 ASSERT(m_codeBlock->isConstantRegisterIndex(src));
50 return m_codeBlock->getConstant(src);
53 ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
55 storePtr(from, payloadFor(entry, callFrameRegister));
58 ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
61 store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
63 storePtr(from, payloadFor(entry, callFrameRegister));
66 ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
68 store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
69 store32(from, intPayloadFor(entry, callFrameRegister));
72 ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
74 storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
77 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
79 loadPtr(Address(from, entry * sizeof(Register)), to);
81 killLastResultRegister();
85 ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
87 failures.append(branchPtr(NotEqual, Address(src, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
88 failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
89 loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
90 failures.append(branchTest32(Zero, dst));
91 loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplFlagsOffset()), regT1);
92 loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
96 is16Bit.append(branchTest32(Zero, regT1, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
97 load8(MacroAssembler::Address(dst, 0), dst);
98 cont8Bit.append(jump());
100 load16(MacroAssembler::Address(dst, 0), dst);
104 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
106 load32(Address(from, entry * sizeof(Register)), to);
108 killLastResultRegister();
112 ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
114 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
116 Call nakedCall = nearCall();
117 m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
121 ALWAYS_INLINE bool JIT::atJumpTarget()
123 while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
124 if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
126 ++m_jumpTargetsPosition;
131 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
133 ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
135 #if CPU(ARM_TRADITIONAL)
137 // Ensure the label after the sequence can also fit
138 insnSpace += sizeof(ARMWord);
139 constSpace += sizeof(uint64_t);
142 ensureSpace(insnSpace, constSpace);
146 insnSpace += sizeof(SH4Word);
147 constSpace += sizeof(uint64_t);
150 m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8);
153 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
155 m_uninterruptedInstructionSequenceBegin = label();
156 m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
161 ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst)
164 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
165 /* There are several cases when the uninterrupted sequence is larger than
166 * maximum required offset for pathing the same sequence. Eg.: if in a
167 * uninterrupted sequence the last macroassembler's instruction is a stub
168 * call, it emits store instruction(s) which should not be included in the
169 * calculation of length of uninterrupted sequence. So, the insnSpace and
170 * constSpace should be upper limit instead of hard limit.
173 if ((dst > 15) || (dst < -16)) {
178 if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15)))
181 ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
182 ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
190 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
192 move(linkRegister, reg);
195 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
197 move(reg, linkRegister);
200 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
202 loadPtr(address, linkRegister);
206 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
208 m_assembler.stspr(reg);
211 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
213 m_assembler.ldspr(reg);
216 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
218 loadPtrLinkReg(address);
223 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
225 move(returnAddressRegister, reg);
228 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
230 move(reg, returnAddressRegister);
233 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
235 loadPtr(address, returnAddressRegister);
238 #else // CPU(X86) || CPU(X86_64)
240 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
245 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
250 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
257 ALWAYS_INLINE void JIT::restoreArgumentReference()
259 move(stackPointerRegister, firstArgumentRegister);
260 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
263 ALWAYS_INLINE void JIT::updateTopCallFrame()
265 ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
266 if (m_bytecodeOffset) {
267 #if USE(JSVALUE32_64)
268 storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
270 store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
273 storePtr(callFrameRegister, &m_globalData->topCallFrame);
276 ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
279 // Within a trampoline the return address will be on the stack at this point.
280 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
282 move(stackPointerRegister, firstArgumentRegister);
284 move(stackPointerRegister, firstArgumentRegister);
286 // In the trampoline on x86-64, the first argument register is not overwritten.
289 ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
291 return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
294 ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
296 if (!m_codeBlock->isKnownNotImmediate(vReg))
300 ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
302 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
304 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
307 ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
309 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
311 const JumpList::JumpVector& jumpVector = jumpList.jumps();
312 size_t size = jumpVector.size();
313 for (size_t i = 0; i < size; ++i)
314 m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
317 ALWAYS_INLINE void JIT::addSlowCase()
319 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
321 Jump emptyJump; // Doing it this way to make Windows happy.
322 m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset));
325 ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
327 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
329 m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
332 ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
334 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
336 jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
339 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg)
341 return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
344 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType type)
346 loadPtr(Address(baseReg, JSCell::structureOffset()), scratchReg);
347 return branch8(NotEqual, Address(scratchReg, Structure::typeInfoTypeOffset()), TrustedImm32(type));
350 #if ENABLE(SAMPLING_FLAGS)
351 ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
355 or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
358 ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
362 and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
366 #if ENABLE(SAMPLING_COUNTERS)
367 ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, int32_t count)
369 add64(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
373 #if ENABLE(OPCODE_SAMPLING)
375 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
377 move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
378 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
381 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
383 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
388 #if ENABLE(CODEBLOCK_SAMPLING)
390 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
392 move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
393 storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx);
396 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
398 storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
403 ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
405 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
408 template <typename ClassType, bool destructor, typename StructureType> inline void JIT::emitAllocateBasicJSObject(StructureType structure, RegisterID result, RegisterID storagePtr)
410 MarkedAllocator* allocator = 0;
412 allocator = &m_globalData->heap.allocatorForObjectWithDestructor(sizeof(ClassType));
414 allocator = &m_globalData->heap.allocatorForObjectWithoutDestructor(sizeof(ClassType));
415 loadPtr(&allocator->m_freeList.head, result);
416 addSlowCase(branchTestPtr(Zero, result));
418 // remove the object from the free list
419 loadPtr(Address(result), storagePtr);
420 storePtr(storagePtr, &allocator->m_freeList.head);
422 // initialize the object's structure
423 storePtr(structure, Address(result, JSCell::structureOffset()));
425 // initialize the object's classInfo pointer
426 storePtr(TrustedImmPtr(&ClassType::s_info), Address(result, JSCell::classInfoOffset()));
428 // initialize the inheritor ID
429 storePtr(TrustedImmPtr(0), Address(result, JSObject::offsetOfInheritorID()));
431 // initialize the object's property storage pointer
432 addPtr(TrustedImm32(sizeof(JSObject)), result, storagePtr);
433 storePtr(storagePtr, Address(result, ClassType::offsetOfPropertyStorage()));
436 template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID scratch)
438 emitAllocateBasicJSObject<JSFinalObject, false, T>(structure, result, scratch);
441 inline void JIT::emitAllocateJSFunction(FunctionExecutable* executable, RegisterID scopeChain, RegisterID result, RegisterID storagePtr)
443 emitAllocateBasicJSObject<JSFunction, true>(TrustedImmPtr(m_codeBlock->globalObject()->namedFunctionStructure()), result, storagePtr);
445 // store the function's scope chain
446 storePtr(scopeChain, Address(result, JSFunction::offsetOfScopeChain()));
448 // store the function's executable member
449 storePtr(TrustedImmPtr(executable), Address(result, JSFunction::offsetOfExecutable()));
451 // clear the function's inheritorID
452 storePtr(TrustedImmPtr(0), Address(result, JSFunction::offsetOfCachedInheritorID()));
454 // store the function's name
455 ASSERT(executable->nameValue());
456 int functionNameOffset = sizeof(JSValue) * m_codeBlock->globalObject()->functionNameOffset();
457 storePtr(TrustedImmPtr(executable->nameValue()), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
458 #if USE(JSVALUE32_64)
459 store32(TrustedImm32(JSValue::CellTag), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
463 inline void JIT::emitAllocateBasicStorage(size_t size, RegisterID result, RegisterID storagePtr)
465 CopiedAllocator* allocator = &m_globalData->heap.storageAllocator();
467 // FIXME: We need to check for wrap-around.
468 // Check to make sure that the allocation will fit in the current block.
469 loadPtr(&allocator->m_currentOffset, result);
470 addPtr(TrustedImm32(size), result);
471 loadPtr(&allocator->m_currentBlock, storagePtr);
472 addPtr(TrustedImm32(HeapBlock::s_blockSize), storagePtr);
473 addSlowCase(branchPtr(AboveOrEqual, result, storagePtr));
475 // Load the original offset.
476 loadPtr(&allocator->m_currentOffset, result);
478 // Bump the pointer forward.
479 move(result, storagePtr);
480 addPtr(TrustedImm32(size), storagePtr);
481 storePtr(storagePtr, &allocator->m_currentOffset);
484 inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr)
486 unsigned initialLength = std::max(length, 4U);
487 size_t initialStorage = JSArray::storageSize(initialLength);
489 // We allocate the backing store first to ensure that garbage collection
490 // doesn't happen during JSArray initialization.
491 emitAllocateBasicStorage(initialStorage, storageResult, storagePtr);
493 // Allocate the cell for the array.
494 emitAllocateBasicJSObject<JSArray, false>(TrustedImmPtr(m_codeBlock->globalObject()->arrayStructure()), cellResult, storagePtr);
496 // Store all the necessary info in the ArrayStorage.
497 storePtr(storageResult, Address(storageResult, ArrayStorage::allocBaseOffset()));
498 store32(Imm32(length), Address(storageResult, ArrayStorage::lengthOffset()));
499 store32(Imm32(length), Address(storageResult, ArrayStorage::numValuesInVectorOffset()));
501 // Store the newly allocated ArrayStorage.
502 storePtr(storageResult, Address(cellResult, JSArray::storageOffset()));
504 // Store the vector length and index bias.
505 store32(Imm32(initialLength), Address(cellResult, JSArray::vectorLengthOffset()));
506 store32(TrustedImm32(0), Address(cellResult, JSArray::indexBiasOffset()));
508 // Initialize the sparse value map.
509 storePtr(TrustedImmPtr(0), Address(cellResult, JSArray::sparseValueMapOffset()));
511 // Store the values we have.
512 for (unsigned i = 0; i < length; i++) {
514 loadPtr(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
515 storePtr(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
517 load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
518 store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
519 load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register) + sizeof(uint32_t)), storagePtr);
520 store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + sizeof(uint32_t)));
524 // Zero out the remaining slots.
525 for (unsigned i = length; i < initialLength; i++) {
527 storePtr(TrustedImmPtr(0), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
529 store32(TrustedImm32(static_cast<int>(JSValue::EmptyValueTag)), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
530 store32(TrustedImm32(0), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
535 #if ENABLE(VALUE_PROFILER)
536 inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
538 ASSERT(shouldEmitProfiling());
539 ASSERT(valueProfile);
541 const RegisterID value = regT0;
542 #if USE(JSVALUE32_64)
543 const RegisterID valueTag = regT1;
545 const RegisterID scratch = regT3;
547 if (ValueProfile::numberOfBuckets == 1) {
548 // We're in a simple configuration: only one bucket, so we can just do a direct
551 storePtr(value, valueProfile->m_buckets);
553 EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile->m_buckets);
554 store32(value, &descriptor->asBits.payload);
555 store32(valueTag, &descriptor->asBits.tag);
560 if (m_randomGenerator.getUint32() & 1)
561 add32(TrustedImm32(1), bucketCounterRegister);
563 add32(TrustedImm32(3), bucketCounterRegister);
564 and32(TrustedImm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
565 move(TrustedImmPtr(valueProfile->m_buckets), scratch);
567 storePtr(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
568 #elif USE(JSVALUE32_64)
569 store32(value, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
570 store32(valueTag, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
574 inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset)
576 if (!shouldEmitProfiling())
578 emitValueProfilingSite(m_codeBlock->valueProfileForBytecodeOffset(bytecodeOffset));
581 inline void JIT::emitValueProfilingSite()
583 emitValueProfilingSite(m_bytecodeOffset);
587 #if USE(JSVALUE32_64)
589 inline void JIT::emitLoadTag(int index, RegisterID tag)
591 RegisterID mappedTag;
592 if (getMappedTag(index, mappedTag)) {
593 move(mappedTag, tag);
598 if (m_codeBlock->isConstantRegisterIndex(index)) {
599 move(Imm32(getConstantOperand(index).tag()), tag);
604 load32(tagFor(index), tag);
608 inline void JIT::emitLoadPayload(int index, RegisterID payload)
610 RegisterID mappedPayload;
611 if (getMappedPayload(index, mappedPayload)) {
612 move(mappedPayload, payload);
617 if (m_codeBlock->isConstantRegisterIndex(index)) {
618 move(Imm32(getConstantOperand(index).payload()), payload);
623 load32(payloadFor(index), payload);
627 inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
629 move(Imm32(v.payload()), payload);
630 move(Imm32(v.tag()), tag);
633 inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base)
635 ASSERT(tag != payload);
637 if (base == callFrameRegister) {
638 ASSERT(payload != base);
639 emitLoadPayload(index, payload);
640 emitLoadTag(index, tag);
644 if (payload == base) { // avoid stomping base
645 load32(tagFor(index, base), tag);
646 load32(payloadFor(index, base), payload);
650 load32(payloadFor(index, base), payload);
651 load32(tagFor(index, base), tag);
654 inline void JIT::emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2)
656 if (isMapped(index1)) {
657 emitLoad(index1, tag1, payload1);
658 emitLoad(index2, tag2, payload2);
661 emitLoad(index2, tag2, payload2);
662 emitLoad(index1, tag1, payload1);
665 inline void JIT::emitLoadDouble(int index, FPRegisterID value)
667 if (m_codeBlock->isConstantRegisterIndex(index)) {
668 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
669 loadDouble(&inConstantPool, value);
671 loadDouble(addressFor(index), value);
674 inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
676 if (m_codeBlock->isConstantRegisterIndex(index)) {
677 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
678 char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
679 convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
681 convertInt32ToDouble(payloadFor(index), value);
684 inline void JIT::emitStore(int index, RegisterID tag, RegisterID payload, RegisterID base)
686 store32(payload, payloadFor(index, base));
687 store32(tag, tagFor(index, base));
690 inline void JIT::emitStoreInt32(int index, RegisterID payload, bool indexIsInt32)
692 store32(payload, payloadFor(index, callFrameRegister));
694 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
697 inline void JIT::emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength)
699 emitStoreInt32(index, payload, indexIsInt32);
700 map(m_bytecodeOffset + opcodeLength, index, tag, payload);
703 inline void JIT::emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32)
705 store32(payload, payloadFor(index, callFrameRegister));
707 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
710 inline void JIT::emitStoreCell(int index, RegisterID payload, bool indexIsCell)
712 store32(payload, payloadFor(index, callFrameRegister));
714 store32(TrustedImm32(JSValue::CellTag), tagFor(index, callFrameRegister));
717 inline void JIT::emitStoreBool(int index, RegisterID payload, bool indexIsBool)
719 store32(payload, payloadFor(index, callFrameRegister));
721 store32(TrustedImm32(JSValue::BooleanTag), tagFor(index, callFrameRegister));
724 inline void JIT::emitStoreDouble(int index, FPRegisterID value)
726 storeDouble(value, addressFor(index));
729 inline void JIT::emitStore(int index, const JSValue constant, RegisterID base)
731 store32(Imm32(constant.payload()), payloadFor(index, base));
732 store32(Imm32(constant.tag()), tagFor(index, base));
735 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
737 emitStore(dst, jsUndefined());
740 inline bool JIT::isLabeled(unsigned bytecodeOffset)
742 for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
743 unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
744 if (jumpTarget == bytecodeOffset)
746 if (jumpTarget > bytecodeOffset)
752 inline void JIT::map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload)
754 if (isLabeled(bytecodeOffset))
757 m_mappedBytecodeOffset = bytecodeOffset;
758 m_mappedVirtualRegisterIndex = virtualRegisterIndex;
760 m_mappedPayload = payload;
762 ASSERT(!canBeOptimizedOrInlined() || m_mappedPayload == regT0);
763 ASSERT(!canBeOptimizedOrInlined() || m_mappedTag == regT1);
766 inline void JIT::unmap(RegisterID registerID)
768 if (m_mappedTag == registerID)
769 m_mappedTag = (RegisterID)-1;
770 else if (m_mappedPayload == registerID)
771 m_mappedPayload = (RegisterID)-1;
774 inline void JIT::unmap()
776 m_mappedBytecodeOffset = (unsigned)-1;
777 m_mappedVirtualRegisterIndex = RegisterFile::ReturnPC;
778 m_mappedTag = (RegisterID)-1;
779 m_mappedPayload = (RegisterID)-1;
782 inline bool JIT::isMapped(int virtualRegisterIndex)
784 if (m_mappedBytecodeOffset != m_bytecodeOffset)
786 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
791 inline bool JIT::getMappedPayload(int virtualRegisterIndex, RegisterID& payload)
793 if (m_mappedBytecodeOffset != m_bytecodeOffset)
795 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
797 if (m_mappedPayload == (RegisterID)-1)
799 payload = m_mappedPayload;
803 inline bool JIT::getMappedTag(int virtualRegisterIndex, RegisterID& tag)
805 if (m_mappedBytecodeOffset != m_bytecodeOffset)
807 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
809 if (m_mappedTag == (RegisterID)-1)
815 inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex)
817 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
818 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
821 addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
825 inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag)
827 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
828 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
831 addSlowCase(branch32(NotEqual, tag, TrustedImm32(JSValue::CellTag)));
835 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
837 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
840 ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
842 if (isOperandConstantImmediateInt(op1)) {
843 constant = getConstantOperand(op1).asInt32();
848 if (isOperandConstantImmediateInt(op2)) {
849 constant = getConstantOperand(op2).asInt32();
857 #else // USE(JSVALUE32_64)
859 ALWAYS_INLINE void JIT::killLastResultRegister()
861 m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
864 // get arg puts an arg from the SF register array into a h/w register
865 ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
867 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
869 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
870 if (m_codeBlock->isConstantRegisterIndex(src)) {
871 JSValue value = m_codeBlock->getConstant(src);
872 if (!value.isNumber())
873 move(TrustedImmPtr(JSValue::encode(value)), dst);
875 move(ImmPtr(JSValue::encode(value)), dst);
876 killLastResultRegister();
880 if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src) && !atJumpTarget()) {
881 // The argument we want is already stored in eax
882 if (dst != cachedResultRegister)
883 move(cachedResultRegister, dst);
884 killLastResultRegister();
888 loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
889 killLastResultRegister();
892 ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
894 if (src2 == m_lastResultBytecodeRegister) {
895 emitGetVirtualRegister(src2, dst2);
896 emitGetVirtualRegister(src1, dst1);
898 emitGetVirtualRegister(src1, dst1);
899 emitGetVirtualRegister(src2, dst2);
903 ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
905 return getConstantOperand(src).asInt32();
908 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
910 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
913 ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
915 storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
916 m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
919 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
921 storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
924 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
927 return branchTestPtr(Zero, reg, tagMaskRegister);
929 return branchTest32(Zero, reg, TrustedImm32(TagMask));
933 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
936 orPtr(reg2, scratch);
937 return emitJumpIfJSCell(scratch);
940 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
942 addSlowCase(emitJumpIfJSCell(reg));
945 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
948 return branchTestPtr(NonZero, reg, tagMaskRegister);
950 return branchTest32(NonZero, reg, TrustedImm32(TagMask));
954 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
956 addSlowCase(emitJumpIfNotJSCell(reg));
959 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
961 if (!m_codeBlock->isKnownNotImmediate(vReg))
962 emitJumpSlowCaseIfNotJSCell(reg);
967 inline void JIT::emitLoadDouble(int index, FPRegisterID value)
969 if (m_codeBlock->isConstantRegisterIndex(index)) {
970 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
971 loadDouble(&inConstantPool, value);
973 loadDouble(addressFor(index), value);
976 inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
978 if (m_codeBlock->isConstantRegisterIndex(index)) {
979 ASSERT(isOperandConstantImmediateInt(index));
980 convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
982 convertInt32ToDouble(addressFor(index), value);
986 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
989 return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
991 return branchTest32(NonZero, reg, TrustedImm32(TagTypeNumber));
995 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
998 return branchPtr(Below, reg, tagTypeNumberRegister);
1000 return branchTest32(Zero, reg, TrustedImm32(TagTypeNumber));
1004 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
1006 move(reg1, scratch);
1007 andPtr(reg2, scratch);
1008 return emitJumpIfNotImmediateInteger(scratch);
1011 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
1013 addSlowCase(emitJumpIfNotImmediateInteger(reg));
1016 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
1018 addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
1021 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
1023 addSlowCase(emitJumpIfNotImmediateNumber(reg));
1026 #if USE(JSVALUE32_64)
1027 ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
1029 subPtr(TrustedImm32(TagTypeNumber), reg);
1032 ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
1034 return branchSubPtr(Zero, TrustedImm32(TagTypeNumber), reg);
1038 ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
1041 emitFastArithIntToImmNoCheck(src, dest);
1045 addPtr(TrustedImm32(TagTypeNumber), dest);
1049 // operand is int32_t, must have been zero-extended if register is 64-bit.
1050 ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
1055 orPtr(tagTypeNumberRegister, dest);
1057 signExtend32ToPtr(src, dest);
1059 emitFastArithReTagImmediate(dest, dest);
1063 ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
1065 or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
1068 #endif // USE(JSVALUE32_64)
1072 #endif // ENABLE(JIT)