AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
case kArchNop:
// don't emit code for nops.
DCHECK_EQ(LeaveRC, i.OutputRCBit());
}
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ PPCOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ Cmpi(input, Operand(i.InputInt32(static_cast<int>(index + 0))), r0);
+ __ beq(GetLabel(i.InputRpo(static_cast<int>(index + 1))));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ PPCOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
+ Label** cases = zone()->NewArray<Label*>(case_count);
+ for (int32_t index = 0; index < case_count; ++index) {
+ cases[index] = GetLabel(i.InputRpo(index + 2));
+ }
+ Label* const table = AddJumpTable(cases, case_count);
+ __ Cmpli(input, Operand(case_count), r0);
+ __ bge(GetLabel(i.InputRpo(1)));
+ __ mov_label_addr(kScratchReg, table);
+ __ ShiftLeftImm(r0, input, Operand(kPointerSizeLog2));
+ __ LoadPX(kScratchReg, MemOperand(kScratchReg, r0));
+ __ Jump(kScratchReg);
+}
+
+
void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
-#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
-#endif
int register_save_area_size = 0;
RegList frame_saves = fp.bit();
__ mflr(r0);
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
+ } else if (stack_slots > 0) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
}
- int stack_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
if (frame()->GetRegisterSaveAreaSize() > 0) {
// Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
if (stack_slots > 0) {
__ Add(sp, sp, stack_slots * kPointerSize, r0);
}
}
__ LeaveFrame(StackFrame::MANUAL);
__ Ret();
- } else {
+ } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
int pop_count = descriptor->IsJSFunctionCall()
? static_cast<int>(descriptor->JSParameterCount())
: 0;
__ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
__ Ret();
+ } else {
+ __ Ret();
}
}
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ for (size_t index = 0; index < target_count; ++index) {
+ __ emit_label_addr(targets[index]);
+ }
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() {
// We do not insert nops for inlined Smi code.
}
}
+void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
+ BasicBlock** case_branches,
+ int32_t* case_values, size_t case_count,
+ int32_t min_value, int32_t max_value) {
+ PPCOperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+ InstructionOperand default_operand = g.Label(default_branch);
+
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ size_t value_range =
+ 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
+
+ // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
+ // instruction.
+ size_t table_space_cost = 4 + value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * case_count;
+ size_t lookup_time_cost = case_count;
+ if (case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (min_value) {
+ index_operand = g.TempRegister();
+ Emit(kPPC_Sub32, index_operand, value_operand,
+ g.TempImmediate(min_value));
+ }
+ size_t input_count = 2 + value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < case_count; ++index) {
+ size_t value = case_values[index] - min_value;
+ BasicBlock* branch = case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+ return;
+ }
+
+ // Generate a sequence of conditional jumps.
+ size_t input_count = 2 + case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = default_operand;
+ for (size_t index = 0; index < case_count; ++index) {
+ int32_t value = case_values[index];
+ BasicBlock* branch = case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
+ ->MarkAsControl();
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
} else {
// No relocation information when printing code stubs.
}
-#if !V8_TARGET_ARCH_PPC
int constants = -1; // no constants being decoded at the start
-#endif
while (pc < end) {
// First decode instruction so that we know its length.
byte* prev_pc = pc;
-#if !V8_TARGET_ARCH_PPC
if (constants > 0) {
SNPrintF(decode_buffer,
"%08x constant",
pc += d.InstructionDecode(decode_buffer, pc);
}
}
-#else // !V8_TARGET_ARCH_PPC
-#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
- // Function descriptors are specially decoded and skipped.
- // Other internal references (load of ool constant pool pointer)
- // are not since they are a encoded as a regular mov sequence.
- int skip;
- if (it != NULL && !it->done() && it->rinfo()->pc() == pc &&
- it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE &&
- (skip = Assembler::DecodeInternalReference(decode_buffer, pc))) {
- pc += skip;
- } else {
- decode_buffer[0] = '\0';
- pc += d.InstructionDecode(decode_buffer, pc);
- }
-#else
- decode_buffer[0] = '\0';
- pc += d.InstructionDecode(decode_buffer, pc);
-#endif // ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
-#endif // !V8_TARGET_ARCH_PPC
// Collect RelocInfo for this instruction (prev_pc .. pc-1)
List<const char*> comments(4);
void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Register holder, int accessor_index, int expected_arguments,
- Register scratch) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- r3 : receiver
// -- r5 : name
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
// Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ LoadP(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Register holder, int accessor_index, int expected_arguments,
- Register scratch) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- lr : return address
// -----------------------------------
DCHECK(!receiver.is(scratch));
DCHECK(!value().is(scratch));
// Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ LoadP(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
Register object_reg, Register holder_reg, Register scratch1,
Register scratch2, Handle<Name> name, Label* miss,
PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+ Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant()) {
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ if (receiver_map->IsJSGlobalObjectMap()) {
+ current = isolate()->global_object();
}
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
#define __ ACCESS_MASM(masm())
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
}
Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
__ JumpIfSmi(receiver(), smi_target);
// Polymorphic keyed stores may use the map register
DCHECK(kind() != Code::KEYED_STORE_IC ||
map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
- int receiver_count = types->length();
+ int receiver_count = maps->length();
int number_of_handled_maps = 0;
__ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
+ Handle<Map> map = maps->at(current);
if (!map->is_deprecated()) {
number_of_handled_maps++;
Handle<WeakCell> cell = Map::WeakCellForMap(map);
__ CmpWeakValue(map_reg, cell, scratch2());
- if (type->Is(HeapType::Number())) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
}
void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
-#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
- if (RelocInfo::IsInternalReference(rmode_)) {
+ if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// absolute code pointer inside code object moves with the code object.
- Assembler::RelocateInternalReference(pc_, delta, 0, icache_flush_mode);
+ Assembler::RelocateInternalReference(pc_, delta, 0, rmode_,
+ icache_flush_mode);
}
-#endif
- // We do not use pc relative addressing on PPC, so there is
- // nothing else to do.
}
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
-const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
+ 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
bool RelocInfo::IsCodedSpecially() {
const int kEndOfChain = -4;
+// Dummy opcodes for unbound label mov instructions or jump table entries.
+enum {
+ kUnboundMovLabelOffsetOpcode = 0 << 26,
+ kUnboundMovLabelAddrOpcode = 1 << 26,
+ kUnboundJumpTableEntryOpcode = 2 << 26
+};
+
+
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
// check which type of branch this is 16 or 26 bit offset
int opcode = instr & kOpcodeMask;
- if (BX == opcode) {
- int imm26 = ((instr & kImm26Mask) << 6) >> 6;
- imm26 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
- if (imm26 == 0) return kEndOfChain;
- return pos + imm26;
- } else if (BCX == opcode) {
- int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
- imm16 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
- if (imm16 == 0) return kEndOfChain;
- return pos + imm16;
- } else if ((instr & ~kImm26Mask) == 0) {
- // Emitted link to a label, not part of a branch (regexp PushBacktrack).
- if (instr == 0) {
- return kEndOfChain;
- } else {
- int32_t imm26 = SIGN_EXT_IMM26(instr);
- return (imm26 + pos);
- }
+ int link;
+ switch (opcode) {
+ case BX:
+ link = SIGN_EXT_IMM26(instr & kImm26Mask);
+ link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
+ break;
+ case BCX:
+ link = SIGN_EXT_IMM16((instr & kImm16Mask));
+ link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
+ break;
+ case kUnboundMovLabelOffsetOpcode:
+ case kUnboundMovLabelAddrOpcode:
+ case kUnboundJumpTableEntryOpcode:
+ link = SIGN_EXT_IMM26(instr & kImm26Mask);
+ link <<= 2;
+ break;
+ default:
+ DCHECK(false);
+ return -1;
}
- DCHECK(false);
- return -1;
+ if (link == 0) return kEndOfChain;
+ return pos + link;
}
Instr instr = instr_at(pos);
int opcode = instr & kOpcodeMask;
- // check which type of branch this is 16 or 26 bit offset
- if (BX == opcode) {
- int imm26 = target_pos - pos;
- DCHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
- if (imm26 == kInstrSize && !(instr & kLKMask)) {
- // Branch to next instr without link.
- instr = ORI; // nop: ori, 0,0,0
- } else {
- instr &= ((~kImm26Mask) | kAAMask | kLKMask);
- instr |= (imm26 & kImm26Mask);
+ switch (opcode) {
+ case BX: {
+ int imm26 = target_pos - pos;
+ DCHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
+ if (imm26 == kInstrSize && !(instr & kLKMask)) {
+ // Branch to next instr without link.
+ instr = ORI; // nop: ori, 0,0,0
+ } else {
+ instr &= ((~kImm26Mask) | kAAMask | kLKMask);
+ instr |= (imm26 & kImm26Mask);
+ }
+ instr_at_put(pos, instr);
+ break;
}
- instr_at_put(pos, instr);
- return;
- } else if (BCX == opcode) {
- int imm16 = target_pos - pos;
- DCHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
- if (imm16 == kInstrSize && !(instr & kLKMask)) {
- // Branch to next instr without link.
- instr = ORI; // nop: ori, 0,0,0
- } else {
- instr &= ((~kImm16Mask) | kAAMask | kLKMask);
- instr |= (imm16 & kImm16Mask);
+ case BCX: {
+ int imm16 = target_pos - pos;
+ DCHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
+ if (imm16 == kInstrSize && !(instr & kLKMask)) {
+ // Branch to next instr without link.
+ instr = ORI; // nop: ori, 0,0,0
+ } else {
+ instr &= ((~kImm16Mask) | kAAMask | kLKMask);
+ instr |= (imm16 & kImm16Mask);
+ }
+ instr_at_put(pos, instr);
+ break;
}
- instr_at_put(pos, instr);
- return;
- } else if ((instr & ~kImm26Mask) == 0) {
- DCHECK(target_pos == kEndOfChain || target_pos >= 0);
- // Emitted link to a label, not part of a branch (regexp PushBacktrack).
- // Load the position of the label relative to the generated code object
- // pointer in a register.
-
- Register dst = r3; // we assume r3 for now
- DCHECK(IsNop(instr_at(pos + kInstrSize)));
- uint32_t target = target_pos + (Code::kHeaderSize - kHeapObjectTag);
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
- CodePatcher::DONT_FLUSH);
- int target_hi = static_cast<int>(target) >> 16;
- int target_lo = static_cast<int>(target) & 0XFFFF;
-
- patcher.masm()->lis(dst, Operand(SIGN_EXT_IMM16(target_hi)));
- patcher.masm()->ori(dst, dst, Operand(target_lo));
- return;
+ case kUnboundMovLabelOffsetOpcode: {
+ // Load the position of the label relative to the generated code object
+ // pointer in a register.
+ Register dst = Register::from_code(instr_at(pos + kInstrSize));
+ int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->bitwise_mov32(dst, offset);
+ break;
+ }
+ case kUnboundMovLabelAddrOpcode: {
+ // Load the address of the label in a register.
+ Register dst = Register::from_code(instr_at(pos + kInstrSize));
+ intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + target_pos);
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ kMovInstructionsNoConstantPool,
+ CodePatcher::DONT_FLUSH);
+ AddBoundInternalReferenceLoad(pos);
+ patcher.masm()->bitwise_mov(dst, addr);
+ break;
+ }
+ case kUnboundJumpTableEntryOpcode: {
+ intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + target_pos);
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
+ AddBoundInternalReference(pos);
+ patcher.masm()->emit_ptr(addr);
+ break;
+ }
+ default:
+ DCHECK(false);
+ break;
}
-
- DCHECK(false);
}
int opcode = instr & kOpcodeMask;
// check which type of branch this is 16 or 26 bit offset
- if (BX == opcode) {
- return 26;
- } else if (BCX == opcode) {
- return 16;
- } else if ((instr & ~kImm26Mask) == 0) {
- // Emitted label constant, not part of a branch (regexp PushBacktrack).
- return 26;
+ switch (opcode) {
+ case BX:
+ return 26;
+ case BCX:
+ return 16;
+ case kUnboundMovLabelOffsetOpcode:
+ case kUnboundMovLabelAddrOpcode:
+ case kUnboundJumpTableEntryOpcode:
+ return 0; // no limit on reach
}
DCHECK(false);
int32_t offset = pos - fixup_pos;
int maxReach = max_reach_from(fixup_pos);
next(L); // call next before overwriting link with target at fixup_pos
- if (is_intn(offset, maxReach) == false) {
+ if (maxReach && is_intn(offset, maxReach) == false) {
if (trampoline_pos == kInvalidSlotPos) {
trampoline_pos = get_trampoline_entry();
CHECK(trampoline_pos != kInvalidSlotPos);
}
-int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
- int target_pos;
+int Assembler::link(Label* L) {
+ int position;
if (L->is_bound()) {
- target_pos = L->pos();
+ position = L->pos();
} else {
if (L->is_linked()) {
- target_pos = L->pos(); // L's link
+ position = L->pos(); // L's link
} else {
// was: target_pos = kEndOfChain;
- // However, using branch to self to mark the first reference
+ // However, using self to mark the first reference
// should avoid most instances of branch offset overflow. See
// target_at() for where this is converted back to kEndOfChain.
- target_pos = pc_offset();
+ position = pc_offset();
if (!trampoline_emitted_) {
unbound_labels_count_++;
next_buffer_check_ -= kTrampolineSlotsSize;
L->link_to(pc_offset());
}
- return target_pos - pc_offset();
+ return position;
}
// TOC and static chain are ignored and set to 0.
void Assembler::function_descriptor() {
#if ABI_USES_FUNCTION_DESCRIPTORS
+ Label instructions;
DCHECK(pc_offset() == 0);
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
- emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize);
+ emit_label_addr(&instructions);
emit_ptr(0);
emit_ptr(0);
+ bind(&instructions);
#endif
}
-#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
void Assembler::RelocateInternalReference(Address pc, intptr_t delta,
Address code_start,
+ RelocInfo::Mode rmode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(delta || code_start);
-#if ABI_USES_FUNCTION_DESCRIPTORS
- uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
- if (fd[1] == 0 && fd[2] == 0) {
- // Function descriptor
+ if (RelocInfo::IsInternalReference(rmode)) {
+ // Jump table entry
+ DCHECK(delta || code_start);
+ uintptr_t* entry = reinterpret_cast<uintptr_t*>(pc);
if (delta) {
- fd[0] += delta;
+ *entry += delta;
} else {
- fd[0] = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize;
+ // remove when serializer properly supports internal references
+ *entry = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize;
}
- return;
- }
-#endif
-#if V8_OOL_CONSTANT_POOL
- // mov for LoadConstantPoolPointerRegister
- ConstantPoolArray* constant_pool = NULL;
- if (delta) {
- code_start = target_address_at(pc, constant_pool) + delta;
- }
- set_target_address_at(pc, constant_pool, code_start, icache_flush_mode);
-#endif
-}
-
-
-int Assembler::DecodeInternalReference(Vector<char> buffer, Address pc) {
-#if ABI_USES_FUNCTION_DESCRIPTORS
- uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
- if (fd[1] == 0 && fd[2] == 0) {
- // Function descriptor
- SNPrintF(buffer, "[%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
- "]"
- " function descriptor",
- fd[0], fd[1], fd[2]);
- return kPointerSize * 3;
+ } else {
+ // mov sequence
+ DCHECK(delta || code_start);
+ DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
+ ConstantPoolArray* constant_pool = NULL;
+ Address addr;
+ if (delta) {
+ addr = target_address_at(pc, constant_pool) + delta;
+ } else {
+ // remove when serializer properly supports internal references
+ addr = code_start;
+ }
+ set_target_address_at(pc, constant_pool, addr, icache_flush_mode);
}
-#endif
- return 0;
}
-#endif
int Assembler::instructions_required_for_mov(const Operand& x) const {
}
DCHECK(!canOptimize);
+ bitwise_mov(dst, value);
+}
- {
+
+void Assembler::bitwise_mov(Register dst, intptr_t value) {
BlockTrampolinePoolScope block_trampoline_pool(this);
#if V8_TARGET_ARCH_PPC64
int32_t hi_32 = static_cast<int32_t>(value >> 32);
lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
ori(dst, dst, Operand(lo_word));
#endif
- }
+}
+
+
+void Assembler::bitwise_mov32(Register dst, int32_t value) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ int hi_word = static_cast<int>(value >> 16);
+ int lo_word = static_cast<int>(value & 0xffff);
+ lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
+ ori(dst, dst, Operand(lo_word));
}
void Assembler::mov_label_offset(Register dst, Label* label) {
+ int position = link(label);
if (label->is_bound()) {
- int target = label->pos();
- mov(dst, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ // Load the position of the label relative to the generated code object.
+ mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
} else {
- bool is_linked = label->is_linked();
- // Emit the link to the label in the code stream followed by extra
- // nop instructions.
- DCHECK(dst.is(r3)); // target_at_put assumes r3 for now
- int link = is_linked ? label->pos() - pc_offset() : 0;
- label->link_to(pc_offset());
-
- if (!is_linked && !trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
+ // Encode internal reference to unbound label. We use a dummy opcode
+ // such that it won't collide with any opcode that might appear in the
+ // label's chain. Encode the destination register in the 2nd instruction.
+ int link = position - pc_offset();
+ DCHECK_EQ(0, link & 3);
+ link >>= 2;
+ DCHECK(is_int26(link));
// When the label is bound, these instructions will be patched
// with a 2 instruction mov sequence that will load the
// destination register with the position of the label from the
// beginning of the code.
//
- // When the label gets bound: target_at extracts the link and
- // target_at_put patches the instructions.
+ // target_at extracts the link and target_at_put patches the instructions.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
+ emit(dst.code());
+ }
+}
+
+
+// TODO(mbrandy): allow loading internal reference from constant pool
+void Assembler::mov_label_addr(Register dst, Label* label) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ int position = link(label);
+ if (label->is_bound()) {
+// CheckBuffer() is called too frequently. This will pre-grow
+// the buffer if needed to avoid spliting the relocation and instructions
+#if V8_OOL_CONSTANT_POOL
+ EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize);
+#endif
+
+ intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + position);
+ AddBoundInternalReferenceLoad(pc_offset());
+ bitwise_mov(dst, addr);
+ } else {
+ // Encode internal reference to unbound label. We use a dummy opcode
+ // such that it won't collide with any opcode that might appear in the
+ // label's chain. Encode the destination register in the 2nd instruction.
+ int link = position - pc_offset();
+ DCHECK_EQ(0, link & 3);
+ link >>= 2;
+ DCHECK(is_int26(link));
+
+ // When the label is bound, these instructions will be patched
+ // with a multi-instruction mov sequence that will load the
+ // destination register with the address of the label.
+ //
+ // target_at extracts the link and target_at_put patches the instructions.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
+ emit(dst.code());
+ DCHECK(kMovInstructionsNoConstantPool >= 2);
+ for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
+ }
+}
+
+
+void Assembler::emit_label_addr(Label* label) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ int position = link(label);
+ if (label->is_bound()) {
+// CheckBuffer() is called too frequently. This will pre-grow
+// the buffer if needed to avoid spliting the relocation and entry.
+#if V8_OOL_CONSTANT_POOL
+ EnsureSpaceFor(kPointerSize);
+#endif
+
+ intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + position);
+ AddBoundInternalReference(pc_offset());
+ emit_ptr(addr);
+ } else {
+ // Encode internal reference to unbound label. We use a dummy opcode
+ // such that it won't collide with any opcode that might appear in the
+ // label's chain.
+ int link = position - pc_offset();
+ DCHECK_EQ(0, link & 3);
+ link >>= 2;
+ DCHECK(is_int26(link));
+
+ // When the label is bound, the instruction(s) will be patched
+ // as a jump table entry containing the label address. target_at extracts
+ // the link and target_at_put patches the instruction(s).
BlockTrampolinePoolScope block_trampoline_pool(this);
- emit(link);
+ emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
+#if V8_TARGET_ARCH_PPC64
nop();
+#endif
}
}
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
-// None of our relocation types are pc relative pointing outside the code
-// buffer nor pc absolute pointing inside the code buffer, so there is no need
-// to relocate any emitted relocation entries.
-
-#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
- // Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- RelocateInternalReference(it.rinfo()->pc(), pc_delta, 0);
- }
+ // Relocate internal references
+ for (int pos : internal_reference_positions_) {
+ RelocateInternalReference(buffer_ + pos, pc_delta, 0,
+ RelocInfo::INTERNAL_REFERENCE);
+ }
+ for (int pos : internal_reference_load_positions_) {
+ RelocateInternalReference(buffer_ + pos, pc_delta, 0,
+ RelocInfo::INTERNAL_REFERENCE_ENCODED);
}
#if V8_OOL_CONSTANT_POOL
constant_pool_builder_.Relocate(pc_delta);
#endif
-#endif
}
}
-void Assembler::emit_ptr(uintptr_t data) {
+void Assembler::emit_ptr(intptr_t data) {
CheckBuffer();
*reinterpret_cast<uintptr_t*>(pc_) = data;
pc_ += sizeof(uintptr_t);
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
+
+ // Links a label at the current pc_offset(). If already bound, returns the
+ // bound position. If already linked, returns the position of the prior link.
+ // Otherwise, returns the current pc_offset().
+ int link(Label* L);
+
// Determines if Label is bound and near enough so that a single
// branch instruction can be used to reach it.
bool is_near(Label* L, Condition cond);
// Returns the branch offset to the given label from the current code position
// Links the label to the current position if it is still unbound
// Manages the jump elimination optimization if the second parameter is true.
- int branch_offset(Label* L, bool jump_elimination_allowed);
+ int branch_offset(Label* L, bool jump_elimination_allowed) {
+ int position = link(L);
+ return position - pc_offset();
+ }
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
void cmplw(Register src1, Register src2, CRegister cr = cr7);
void mov(Register dst, const Operand& src);
+ void bitwise_mov(Register dst, intptr_t value);
+ void bitwise_mov32(Register dst, int32_t value);
// Load the position of the label relative to the generated code object
// pointer in a register.
void mov_label_offset(Register dst, Label* label);
+ // Load the address of the label in a register and associate with an
+ // internal reference relocation.
+ void mov_label_addr(Register dst, Label* label);
+
+ // Emit the address of the label (i.e. a jump table entry) and associate with
+ // an internal reference relocation.
+ void emit_label_addr(Label* label);
+
// Multiply instructions
void mul(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
RCBit r = LeaveRC);
// for inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
- void emit_ptr(uintptr_t data);
+ void emit_ptr(intptr_t data);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
}
#endif
-#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
static void RelocateInternalReference(
- Address pc, intptr_t delta, Address code_start,
+ Address pc, intptr_t delta, Address code_start, RelocInfo::Mode rmode,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- static int DecodeInternalReference(Vector<char> buffer, Address pc);
-#endif
+
+ void AddBoundInternalReference(int position) {
+ internal_reference_positions_.push_back(position);
+ }
+
+ void AddBoundInternalReferenceLoad(int position) {
+ internal_reference_load_positions_.push_back(position);
+ }
protected:
// Relocation for a type-recording IC has the AST id added to it. This
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
+ // Internal reference positions, required for (potential) patching in
+ // GrowBuffer(); contains only those internal references whose labels
+ // are already bound.
+ std::deque<int> internal_reference_positions_;
+ std::deque<int> internal_reference_load_positions_;
+
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
// receiver is the hole.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ Push(r7, ip);
+
+ // smi arguments count, new.target, receiver
+ __ Push(r7, r6, ip);
// Set up pointer to last argument.
__ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// r7: number of arguments (smi-tagged)
// cr0: compare against zero of arguments
// sp[0]: receiver
- // sp[1]: number of arguments (smi-tagged)
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
Label loop, no_args;
__ beq(&no_args, cr0);
__ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
__ bdnz(&loop);
__ bind(&no_args);
+ __ addi(r3, r3, Operand(1));
+
+ // Handle step in.
+ Label skip_step_in;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ mov(r5, Operand(debug_step_in_fp));
+ __ LoadP(r5, MemOperand(r5));
+ __ and_(r0, r5, r5, SetRC);
+ __ beq(&skip_step_in, cr0);
+
+ __ Push(r3, r4, r4);
+ __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
+ __ Pop(r3, r4);
+
+ __ bind(&skip_step_in);
+
// Call the function.
// r3: number of arguments
// r4: constructor function
// know where the return address is. The CEntryStub is unmovable, so
// we can store the address on the stack to be able to find it again and
// we never have to restore it, because it will not change.
- // Compute the return address in lr to return to after the jump below. Pc is
- // already at '+ 8' from the current instruction but return is after three
- // instructions so add another 4 to pc to get the return address.
- {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- Label here;
- __ b(&here, SetLK);
- __ bind(&here);
- __ mflr(r8);
-
- // Constant used below is dependent on size of Call() macro instructions
- __ addi(r0, r8, Operand(20));
-
- __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
- __ Call(target);
- }
+ Label after_call;
+ __ mov_label_addr(r0, &after_call);
+ __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ __ Call(target);
+ __ bind(&after_call);
#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
// If return value is on the stack, pop it to registers.
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ CHECK(!has_new_target());
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
// sp[1] : receiver displacement
// sp[2] : function
+ CHECK(!has_new_target());
+
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// r9 : allocated object (tagged)
// r11 : mapped parameter count (tagged)
+ CHECK(!has_new_target());
+
__ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
// r4 = parameter count (tagged)
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ if (has_new_target()) {
+ // Subtract 1 from smi-tagged arguments count.
+ __ SubSmiLiteral(r4, r4, Smi::FromInt(1), r0);
+ }
__ StoreP(r4, MemOperand(sp, 0));
__ SmiToPtrArrayOffset(r6, r4);
__ add(r6, r5, r6);
}
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // sp[0] : index of rest parameter
+ // sp[4] : number of parameters
+ // sp[8] : receiver displacement
+
+ Label runtime;
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ StoreP(r4, MemOperand(sp, 1 * kPointerSize));
+ __ SmiToPtrArrayOffset(r6, r4);
+ __ add(r6, r5, r6);
+ __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+}
+
+
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
}
// Pass function as original constructor.
- __ mr(r6, r4);
+ if (IsSuperConstructorCall()) {
+ __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
+ __ addi(r7, r7, Operand(kPointerSize));
+ __ LoadPX(r6, MemOperand(sp, r7));
+ } else {
+ __ mr(r6, r4);
+ }
// Jump to the function-specific construct stub.
Register jmp_reg = r7;
}
}
+ __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ StoreP(fp, MemOperand(ip));
+
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
instr->InstructionBits());
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ // The first field will be identified as a jump table entry. We emit the rest
+ // of the structure as zero, so just skip past them.
+ if (instr->InstructionBits() == 0) {
+ Format(instr, "constant");
+ return Instruction::kInstrSize;
+ }
+#endif
+
switch (instr->OpcodeValue() << 26) {
case TWI: {
PrintSoftwareInterrupt(instr->SvcValue());
bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
__ push(r4);
- __ Push(info->scope()->GetScopeInfo());
+ __ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
}
}
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ addi(r6, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ mov(r5, Operand(Smi::FromInt(num_parameters)));
+ __ mov(r4, Operand(Smi::FromInt(rest_index)));
+ __ Push(r6, r5, r4);
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+
+ SetVar(rest_param, r3, r4, r5);
+ }
+
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
- if (is_strict(language_mode())) {
+ if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type);
+ ArgumentsAccessStub stub(isolate(), type, has_new_target);
__ CallStub(&stub);
SetVar(arguments, r3, r4, r5);
// sequence.
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
+ int32_t arg_count = info_->scope()->num_parameters() + 1;
+ if (IsSubclassConstructor(info_->function()->kind())) {
+ arg_count++;
+ }
+ int32_t sp_delta = arg_count * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
}
-void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
- DCHECK(super_ref != NULL);
+void FullCodeGenerator::EmitLoadSuperConstructor() {
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(r3);
__ CallRuntime(Runtime::kGetPrototype, 1);
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- SuperReference* super_ref = expr->expression()->AsSuperReference();
- EmitLoadSuperConstructor(super_ref);
- __ push(result_register());
-
- Variable* this_var = super_ref->this_var()->var();
+ if (!ValidateSuperCall(expr)) return;
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
- GetVar(r3, this_var);
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- Label uninitialized_this;
- __ beq(&uninitialized_this);
- __ mov(r3, Operand(this_var->name()));
- __ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&uninitialized_this);
+ EmitLoadSuperConstructor();
+ __ push(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
__ Move(r5, FeedbackVector());
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot()));
- // TODO(dslomov): use a different stub and propagate new.target.
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Drop(1);
+
RecordJSReturnSite(expr);
+ SuperReference* super_ref = expr->expression()->AsSuperReference();
+ Variable* this_var = super_ref->this_var()->var();
+ GetVar(r4, this_var);
+ __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+ Label uninitialized_this;
+ __ beq(&uninitialized_this);
+ __ mov(r4, Operand(this_var->name()));
+ __ push(r4);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+
EmitVariableAssignment(this_var, Token::INIT_CONST);
context()->Plug(r3);
}
}
+void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
+
+ EmitLoadSuperConstructor();
+ __ mr(r4, result_register());
+ __ Push(r4);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, args_set_up, runtime;
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ beq(&adaptor_frame);
+
+ // default constructor has no arguments, so no adaptor frame means no args.
+ __ li(r3, Operand::Zero());
+ __ b(&args_set_up);
+
+ // Copy arguments from adaptor frame.
+ {
+ __ bind(&adaptor_frame);
+ __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(r3);
+
+ // Subtract 1 from arguments count, for new.target.
+ __ subi(r3, r3, Operand(1));
+
+ // Get arguments pointer in r5.
+ __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ add(r5, r5, r0);
+ __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ Label loop;
+ __ mtctr(r3);
+ __ bind(&loop);
+ // Pre-decrement in order to skip receiver.
+ __ LoadPU(r6, MemOperand(r5, -kPointerSize));
+ __ Push(r6);
+ __ bdnz(&loop);
+ }
+
+ __ bind(&args_set_up);
+ CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ context()->Plug(result_register());
+}
+
+
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
} else {
DCHECK(access_method == CONSTRUCT_INTERNAL_REFERENCE);
base = kConstantPoolRegister;
- ConstantPoolUnavailableScope constant_pool_unavailable(this);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
// CheckBuffer() is called too frequently. This will pre-grow
// the buffer if needed to avoid spliting the relocation and instructions
EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize);
- uintptr_t code_start = reinterpret_cast<uintptr_t>(pc_) - pc_offset();
- mov(base, Operand(code_start, RelocInfo::INTERNAL_REFERENCE));
+ intptr_t code_start = reinterpret_cast<intptr_t>(pc_) - pc_offset();
+ AddBoundInternalReferenceLoad(pc_offset());
+ bitwise_mov(base, code_start);
}
LoadP(kConstantPoolRegister, MemOperand(base, constant_pool_offset));
}
DCHECK(space_number != CODE_SPACE);
}
#endif
-#if V8_TARGET_ARCH_PPC && \
- (ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL)
+#if V8_TARGET_ARCH_PPC
// If we're on a platform that uses function descriptors
// these jump tables make use of RelocInfo::INTERNAL_REFERENCE.
// As the V8 serialization code doesn't handle that relocation type
Code* code = reinterpret_cast<Code*>(HeapObject::FromAddress(address));
for (RelocIterator it(code); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ if (RelocInfo::IsInternalReference(rmode) ||
+ RelocInfo::IsInternalReferenceEncoded(rmode)) {
Assembler::RelocateInternalReference(it.rinfo()->pc(), 0,
- code->instruction_start());
+ code->instruction_start(), rmode);
}
}
}