|| rmode_ == EXTERNAL_REFERENCE);
if (FLAG_enable_embedded_constant_pool ||
Assembler::IsMovW(Memory::int32_at(pc_))) {
- // We return the PC for ool constant pool since this function is used by the
- // serializer and expects the address to reside within the code object.
+ // We return the PC for embedded constant pool since this function is used
+ // by the serializer and expects the address to reside within the code
+ // object.
return reinterpret_cast<Address>(pc_);
} else {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
__ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
+
if (FLAG_enable_embedded_constant_pool) {
__ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r0);
}
__ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
DeoptimizationInputData::kOsrPcOffsetIndex)));
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ add(r0, r0, Operand::SmiUntag(r1));
- __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Compute the target address = code start + osr_offset
+ __ add(lr, r0, Operand::SmiUntag(r1));
// And "return" to the OSR entry point of the function.
__ Ret();
__ ldr(r1, MemOperand(r1));
__ mov(r2, Operand(pending_handler_offset_address));
__ ldr(r2, MemOperand(r2));
+ __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
if (FLAG_enable_embedded_constant_pool) {
__ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r1);
}
- __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ add(pc, r1, r2);
}
__ ldr(map_load_offset, MemOperand(map_load_offset));
__ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
- __ mov(r8, map);
+ __ mov(scratch, map);
// |map_load_offset| points at the beginning of the cell. Calculate the
// field containing the map.
__ add(function, map_load_offset, Operand(Cell::kValueOffset - 1));
- __ RecordWriteField(map_load_offset, Cell::kValueOffset, r8, function,
+ __ RecordWriteField(map_load_offset, Cell::kValueOffset, scratch, function,
kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
}
// this position in a symbol (see static asserts in type-feedback-vector.h).
Label check_allocation_site;
Register feedback_map = r5;
- Register weak_value = r8;
+ Register weak_value = r6;
__ ldr(weak_value, FieldMemOperand(r4, WeakCell::kValueOffset));
__ cmp(r1, weak_value);
__ b(eq, &done);
Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
Register feedback = r4;
Register receiver_map = r5;
- Register scratch1 = r8;
+ Register scratch1 = r6;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
Register feedback = r4;
Register receiver_map = r5;
- Register scratch1 = r8;
+ Register scratch1 = r6;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
const int kNumSafepointRegisters = 16;
+// The embedded constant pool pointer (r8/pp) is not included in the safepoint
+// since it is not tagged. This register is preserved in the stack frame where
+// its value will be updated if GC code movement occurs. Including it in the
+// safepoint (where it will not be relocated) would cause a stale value to be
+// restored.
+const RegList kConstantPointerRegMask =
+ FLAG_enable_embedded_constant_pool ? (1 << 8) : 0;
+const int kNumConstantPoolPointerReg =
+ FLAG_enable_embedded_constant_pool ? 1 : 0;
+
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+const RegList kSafepointSavedRegisters =
+ kJSCallerSaved | (kCalleeSaved & ~kConstantPointerRegMask);
+const int kNumSafepointSavedRegisters =
+ kNumJSCallerSaved + kNumCalleeSaved - kNumConstantPoolPointerReg;
// ----------------------------------------------------
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
- // Safepoints expect a block of contiguous register values starting with r0:
- DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
+ // Safepoints expect a block of contiguous register values starting with r0.
+ // except when FLAG_enable_embedded_constant_pool, which omits pp.
+ DCHECK(kSafepointSavedRegisters ==
+ (FLAG_enable_embedded_constant_pool
+ ? ((1 << (kNumSafepointSavedRegisters + 1)) - 1) & ~pp.bit()
+ : (1 << kNumSafepointSavedRegisters) - 1));
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
+ if (FLAG_enable_embedded_constant_pool && reg_code > pp.code()) {
+ // RegList omits pp.
+ reg_code -= 1;
+ }
DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
return reg_code;
}
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
inline void Unreachable();
if (!emitted) {
// Mark start of constant pool. Align if necessary.
- if (!empty) assm->Align(kDoubleSize);
+ if (!empty) assm->DataAlign(kDoubleSize);
assm->bind(&emitted_label_);
if (!empty) {
// Emit in groups based on access and type.
EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE);
EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR);
if (info_[ConstantPoolEntry::DOUBLE].overflow()) {
- assm->Align(kDoubleSize);
+ assm->DataAlign(kDoubleSize);
EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
ConstantPoolEntry::DOUBLE);
}
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
}
+
+
+void Assembler::DataAlign(int m) {
+ DCHECK(m >= 2 && base::bits::IsPowerOfTwo32(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ db(0);
+ }
+}
} // namespace internal
} // namespace v8
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
if (FLAG_enable_embedded_constant_pool &&
Assembler::IsConstantPoolLoadStart(pc_)) {
- // We return the PC for ool constant pool since this function is used by the
- // serializer and expects the address to reside within the code object.
+ // We return the PC for embedded constant pool since this function is used
+ // by the serializer and expects the address to reside within the code
+ // object.
return reinterpret_cast<Address>(pc_);
}
void Assembler::Align(int m) {
-#if V8_TARGET_ARCH_PPC64
- DCHECK(m >= 4 && base::bits::IsPowerOfTwo64(m));
-#else
DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
-#endif
- // First ensure instruction alignment
- while (pc_offset() & (kInstrSize - 1)) {
- db(0);
- }
- // Then pad to requested alignedment with nops
- while (pc_offset() & (m - 1)) {
+ DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
+ while ((pc_offset() & (m - 1)) != 0) {
nop();
}
}
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
ALL_SPACES(kBackref, kPlain, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
- defined(V8_TARGET_ARCH_PPC)
+ defined(V8_TARGET_ARCH_PPC) || V8_EMBEDDED_CONSTANT_POOL
// Deserialize a new object from pointer found in code and write
// a pointer to it to the current object. Required only for MIPS, PPC or
- // ARM with ool constant pool, and omitted on the other architectures
+ // ARM with embedded constant pool, and omitted on the other architectures
// because it is fully unrolled and would cause bloat.
ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to it to the current
- // object. Required only for MIPS, PPC or ARM with ool constant pool.
+ // object. Required only for MIPS, PPC or ARM with embedded constant pool.
ALL_SPACES(kBackref, kFromCode, kStartOfObject)
ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
#endif
CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
- defined(V8_TARGET_ARCH_PPC)
+ defined(V8_TARGET_ARCH_PPC) || V8_EMBEDDED_CONSTANT_POOL
// Find an object in the roots array and write a pointer to it to in code.
CASE_STATEMENT(kRootArray, kFromCode, kStartOfObject, 0)
CASE_BODY(kRootArray, kFromCode, kStartOfObject, 0)
// possible to align the pc offset to a multiple
// of m, where m must be a power of 2.
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();