From 40af47d46abe7b26119b0d70d4c48726688820b7 Mon Sep 17 00:00:00 2001 From: "rmcilroy@chromium.org" Date: Thu, 3 Jul 2014 17:01:14 +0000 Subject: [PATCH] [Arm]: Enable use of extended out-of-line constant pool for Arm. - Adds support to the Arm assembler to use extended constant pools. - Update (set_)target_address_at to support extended constant pool load updates. - Replace Operand::is_single_instruction with Operand::instructions_required Due to the fact that different constant pool load types require different numbers of instructions. - Various cleanups of ConstantPoolBuilder to cleaner integration of the extended constant pool building. - Update GetRelocatedValue such that offsets to both map_check and bool_load are explicitly provided, rather than location of bool_load being inferred based on map_check, since the code inbetween is no longer of a predictable size. - Update MacroAssembler::GetRelocatedValueLocation() to add support for getting a value from an extended constant pool entry. - Update Debug::SetAfterBreakTarget() to use target_address_from_return_address when checking for debug breaks at constant pool load points. - Change ConstantPoolIterateBody to iterate over both heap and code pointer in the small section before moving onto the extended section, to work around the requirement of the serializer that pointers are iterated in-order. - Increase old_pointer_space SizeOfFirstPage() to offset the fact that constant pools are now in the old pointer space (rather than code). R=ulan@chromium.org Review URL: https://codereview.chromium.org/356393003 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22209 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arm/assembler-arm-inl.h | 75 +++++---- src/arm/assembler-arm.cc | 339 ++++++++++++++++++++++++----------------- src/arm/assembler-arm.h | 94 +++++++----- src/arm/code-stubs-arm.cc | 32 ++-- src/arm/constants-arm.h | 1 + src/arm/full-codegen-arm.cc | 44 +++++- src/arm/lithium-codegen-arm.cc | 59 ++++--- src/arm/lithium-codegen-arm.h | 2 +- src/arm/macro-assembler-arm.cc | 74 ++++++--- src/arm/macro-assembler-arm.h | 19 ++- src/debug.cc | 37 ++--- src/objects-inl.h | 101 +++++++++--- src/objects.cc | 26 ++-- src/objects.h | 57 ++++--- src/spaces.cc | 2 +- 15 files changed, 622 insertions(+), 340 deletions(-) diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h index 2c44c78..f5b9015 100644 --- a/src/arm/assembler-arm-inl.h +++ b/src/arm/assembler-arm-inl.h @@ -429,8 +429,15 @@ Address Assembler::target_address_from_return_address(Address pc) { // movt ip, #... @ call address high 16 // blx ip // @ return address - // Or pre-V7 or cases that need frequent patching: - // ldr ip, [pc, #...] @ call address + // Or pre-V7 or cases that need frequent patching, the address is in the + // constant pool. It could be a small constant pool load: + // ldr ip, [pc / pp, #...] @ call address + // blx ip + // @ return address + // Or an extended constant pool load: + // movw ip, #... + // movt ip, #... + // ldr ip, [pc, ip] @ call address // blx ip // @ return address Address candidate = pc - 2 * Assembler::kInstrSize; @@ -438,22 +445,35 @@ Address Assembler::target_address_from_return_address(Address pc) { if (IsLdrPcImmediateOffset(candidate_instr) | IsLdrPpImmediateOffset(candidate_instr)) { return candidate; + } else if (IsLdrPpRegOffset(candidate_instr)) { + candidate = pc - 4 * Assembler::kInstrSize; + ASSERT(IsMovW(Memory::int32_at(candidate)) && + IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize))); + return candidate; + } else { + candidate = pc - 3 * Assembler::kInstrSize; + ASSERT(IsMovW(Memory::int32_at(candidate)) && + IsMovT(Memory::int32_at(candidate + kInstrSize))); + return candidate; } - candidate = pc - 3 * Assembler::kInstrSize; - ASSERT(IsMovW(Memory::int32_at(candidate)) && - IsMovT(Memory::int32_at(candidate + kInstrSize))); - return candidate; } Address Assembler::return_address_from_call_start(Address pc) { if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) | IsLdrPpImmediateOffset(Memory::int32_at(pc))) { + // Load from constant pool, small section. return pc + kInstrSize * 2; } else { ASSERT(IsMovW(Memory::int32_at(pc))); ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize))); - return pc + kInstrSize * 3; + if (IsLdrPpRegOffset(Memory::int32_at(pc + kInstrSize))) { + // Load from constant pool, extended section. + return pc + kInstrSize * 4; + } else { + // A movw / movt load immediate. + return pc + kInstrSize * 3; + } } } @@ -468,20 +488,11 @@ void Assembler::deserialization_set_special_target_at( } -static Instr EncodeMovwImmediate(uint32_t immediate) { - ASSERT(immediate < 0x10000); - return ((immediate & 0xf000) << 4) | (immediate & 0xfff); -} - - -static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate) { - instruction &= ~EncodeMovwImmediate(0xffff); - return instruction | EncodeMovwImmediate(immediate); -} - - -static bool IsConstantPoolLoad(Address pc) { - return !Assembler::IsMovW(Memory::int32_at(pc)); +bool Assembler::is_constant_pool_load(Address pc) { + return !Assembler::IsMovW(Memory::int32_at(pc)) || + (FLAG_enable_ool_constant_pool && + Assembler::IsLdrPpRegOffset( + Memory::int32_at(pc + 2 * Assembler::kInstrSize))); } @@ -489,9 +500,21 @@ Address Assembler::constant_pool_entry_address( Address pc, ConstantPoolArray* constant_pool) { if (FLAG_enable_ool_constant_pool) { ASSERT(constant_pool != NULL); - ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc))); - return reinterpret_cast
(constant_pool) + - GetLdrRegisterImmediateOffset(Memory::int32_at(pc)); + int cp_offset; + if (IsMovW(Memory::int32_at(pc))) { + ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)) && + IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize))); + // This is an extended constant pool lookup. + Instruction* movw_instr = Instruction::At(pc); + Instruction* movt_instr = Instruction::At(pc + kInstrSize); + cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) | + movw_instr->ImmedMovwMovtValue(); + } else { + // This is a small constant pool lookup. + ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc))); + cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc)); + } + return reinterpret_cast
(constant_pool) + cp_offset; } else { ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc))); Instr instr = Memory::int32_at(pc); @@ -502,7 +525,7 @@ Address Assembler::constant_pool_entry_address( Address Assembler::target_address_at(Address pc, ConstantPoolArray* constant_pool) { - if (IsConstantPoolLoad(pc)) { + if (is_constant_pool_load(pc)) { // This is a constant pool lookup. Return the value in the constant pool. return Memory::Address_at(constant_pool_entry_address(pc, constant_pool)); } else { @@ -522,7 +545,7 @@ void Assembler::set_target_address_at(Address pc, ConstantPoolArray* constant_pool, Address target, ICacheFlushMode icache_flush_mode) { - if (IsConstantPoolLoad(pc)) { + if (is_constant_pool_load(pc)) { // This is a constant pool lookup. Update the entry in the constant pool. Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target; // Intuitively, we would think it is necessary to always flush the diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc index da11435..a441bc3 100644 --- a/src/arm/assembler-arm.cc +++ b/src/arm/assembler-arm.cc @@ -231,11 +231,7 @@ bool RelocInfo::IsCodedSpecially() { bool RelocInfo::IsInConstantPool() { - if (FLAG_enable_ool_constant_pool) { - return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_)); - } else { - return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)); - } + return Assembler::is_constant_pool_load(pc_); } @@ -416,6 +412,9 @@ const Instr kLdrPCImmedPattern = 5 * B24 | L | kRegister_pc_Code * B16; // ldr rd, [pp, #offset] const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16; const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16; +// ldr rd, [pp, rn] +const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16; +const Instr kLdrPpRegPattern = 7 * B24 | L | kRegister_r8_Code * B16; // vldr dd, [pc, #offset] const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8; const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8; @@ -433,6 +432,8 @@ const Instr kMovMvnPattern = 0xd * B21; const Instr kMovMvnFlip = B22; const Instr kMovLeaveCCMask = 0xdff * B16; const Instr kMovLeaveCCPattern = 0x1a0 * B16; +const Instr kMovwPattern = 0x30 * B20; +const Instr kMovtPattern = 0x34 * B20; const Instr kMovwLeaveCCFlip = 0x5 * B21; const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; const Instr kCmpCmnPattern = 0x15 * B20; @@ -467,7 +468,6 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) first_const_pool_64_use_ = -1; last_bound_pos_ = 0; constant_pool_available_ = !FLAG_enable_ool_constant_pool; - constant_pool_full_ = false; ClearRecordedAstId(); } @@ -638,6 +638,15 @@ Instr Assembler::GetConsantPoolLoadPattern() { } +Instr Assembler::GetConsantPoolLoadMask() { + if (FLAG_enable_ool_constant_pool) { + return kLdrPpImmedMask; + } else { + return kLdrPCImmedMask; + } +} + + bool Assembler::IsPush(Instr instr) { return ((instr & ~kRdMask) == kPushRegPattern); } @@ -682,6 +691,16 @@ bool Assembler::IsLdrPpImmediateOffset(Instr instr) { } +bool Assembler::IsLdrPpRegOffset(Instr instr) { + // Check the instruction is indeed a + // ldr , [pp, +/- ]. + return (instr & kLdrPpRegMask) == kLdrPpRegPattern; +} + + +Instr Assembler::GetLdrPpRegOffsetPattern() { return kLdrPpRegPattern; } + + bool Assembler::IsVldrDPcImmediateOffset(Instr instr) { // Check the instruction is indeed a // vldr
, [pc +/- offset_10]. @@ -983,7 +1002,7 @@ static bool fits_shifter(uint32_t imm32, if (CpuFeatures::IsSupported(ARMv7)) { if (imm32 < 0x10000) { *instr ^= kMovwLeaveCCFlip; - *instr |= EncodeMovwImmediate(imm32); + *instr |= Assembler::EncodeMovwImmediate(imm32); *rotate_imm = *immed_8 = 0; // Not used for movw. return true; } @@ -1032,7 +1051,7 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const { static bool use_mov_immediate_load(const Operand& x, const Assembler* assembler) { - if (assembler != NULL && !assembler->can_use_constant_pool()) { + if (assembler != NULL && !assembler->is_constant_pool_available()) { // If there is no constant pool available, we must use an mov immediate. // TODO(rmcilroy): enable ARMv6 support. ASSERT(CpuFeatures::IsSupported(ARMv7)); @@ -1051,28 +1070,35 @@ static bool use_mov_immediate_load(const Operand& x, } -bool Operand::is_single_instruction(const Assembler* assembler, - Instr instr) const { - if (rm_.is_valid()) return true; +int Operand::instructions_required(const Assembler* assembler, + Instr instr) const { + if (rm_.is_valid()) return 1; uint32_t dummy1, dummy2; if (must_output_reloc_info(assembler) || !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { // The immediate operand cannot be encoded as a shifter operand, or use of - // constant pool is required. For a mov instruction not setting the - // condition code additional instruction conventions can be used. - if ((instr & ~kCondMask) == 13*B21) { // mov, S not set - return !use_mov_immediate_load(*this, assembler); + // constant pool is required. First account for the instructions required + // for the constant pool or immediate load + int instructions; + if (use_mov_immediate_load(*this, assembler)) { + instructions = 2; // A movw, movt immediate load. + } else if (assembler != NULL && assembler->use_extended_constant_pool()) { + instructions = 3; // An extended constant pool load. } else { - // If this is not a mov or mvn instruction there will always an additional - // instructions - either mov or ldr. The mov might actually be two - // instructions mov or movw followed by movt so including the actual - // instruction two or three instructions will be generated. - return false; + instructions = 1; // A small constant pool load. } + + if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set + // For a mov or mvn instruction which doesn't set the condition + // code, the constant pool or immediate load is enough, otherwise we need + // to account for the actual instruction being requested. + instructions += 1; + } + return instructions; } else { // No use of constant pool and the immediate operand can be encoded as a // shifter operand. - return true; + return 1; } } @@ -1100,9 +1126,20 @@ void Assembler::move_32_bit_immediate(Register rd, mov(rd, target, LeaveCC, cond); } } else { - ASSERT(can_use_constant_pool()); - ConstantPoolAddEntry(rinfo); - ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond); + ASSERT(is_constant_pool_available()); + ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); + if (section == ConstantPoolArray::EXTENDED_SECTION) { + ASSERT(FLAG_enable_ool_constant_pool); + Register target = rd.code() == pc.code() ? ip : rd; + // Emit instructions to load constant pool offset. + movw(target, 0, cond); + movt(target, 0, cond); + // Load from constant pool at offset. + ldr(rd, MemOperand(pp, target), cond); + } else { + ASSERT(section == ConstantPoolArray::SMALL_SECTION); + ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond); + } } } @@ -2406,7 +2443,7 @@ void Assembler::vmov(const DwVfpRegister dst, int vd, d; dst.split_code(&vd, &d); emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc); - } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) { + } else if (FLAG_enable_vldr_imm && is_constant_pool_available()) { // TODO(jfb) Temporarily turned off until we have constant blinding or // some equivalent mitigation: an attacker can otherwise control // generated data which also happens to be executable, a Very Bad @@ -2423,8 +2460,18 @@ void Assembler::vmov(const DwVfpRegister dst, // that's tricky because vldr has a limited reach. Furthermore // it breaks load locality. RelocInfo rinfo(pc_, imm); - ConstantPoolAddEntry(rinfo); - vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0)); + ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); + if (section == ConstantPoolArray::EXTENDED_SECTION) { + ASSERT(FLAG_enable_ool_constant_pool); + // Emit instructions to load constant pool offset. + movw(ip, 0); + movt(ip, 0); + // Load from constant pool at offset. + vldr(dst, MemOperand(pp, ip)); + } else { + ASSERT(section == ConstantPoolArray::SMALL_SECTION); + vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0)); + } } else { // Synthesise the double from ARM immediates. uint32_t lo, hi; @@ -3039,7 +3086,7 @@ bool Assembler::IsMovT(Instr instr) { instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions ((kNumRegisters-1)*B12) | // mask out register EncodeMovwImmediate(0xFFFF)); // mask out immediate value - return instr == 0x34*B20; + return instr == kMovtPattern; } @@ -3047,7 +3094,25 @@ bool Assembler::IsMovW(Instr instr) { instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions ((kNumRegisters-1)*B12) | // mask out destination EncodeMovwImmediate(0xFFFF)); // mask out immediate value - return instr == 0x30*B20; + return instr == kMovwPattern; +} + + +Instr Assembler::GetMovTPattern() { return kMovtPattern; } + + +Instr Assembler::GetMovWPattern() { return kMovwPattern; } + + +Instr Assembler::EncodeMovwImmediate(uint32_t immediate) { + ASSERT(immediate < 0x10000); + return ((immediate & 0xf000) << 4) | (immediate & 0xfff); +} + + +Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) { + instruction &= ~EncodeMovwImmediate(0xffff); + return instruction | EncodeMovwImmediate(immediate); } @@ -3217,9 +3282,10 @@ void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { } -void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) { +ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry( + const RelocInfo& rinfo) { if (FLAG_enable_ool_constant_pool) { - constant_pool_builder_.AddEntry(this, rinfo); + return constant_pool_builder_.AddEntry(this, rinfo); } else { if (rinfo.rmode() == RelocInfo::NONE64) { ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo); @@ -3237,6 +3303,7 @@ void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) { // Make sure the constant pool is not emitted in place of the next // instruction for which we just recorded relocation info. BlockConstPoolFor(1); + return ConstantPoolArray::SMALL_SECTION; } } @@ -3488,12 +3555,7 @@ void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { ConstantPoolBuilder::ConstantPoolBuilder() - : entries_(), - merged_indexes_(), - count_of_64bit_(0), - count_of_code_ptr_(0), - count_of_heap_ptr_(0), - count_of_32bit_(0) { } + : entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {} bool ConstantPoolBuilder::IsEmpty() { @@ -3501,28 +3563,23 @@ bool ConstantPoolBuilder::IsEmpty() { } -bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) { - return rmode == RelocInfo::NONE64; -} - - -bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) { - return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64; -} - - -bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) { - return RelocInfo::IsCodeTarget(rmode); -} - - -bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) { - return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode); +ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType( + RelocInfo::Mode rmode) { + if (rmode == RelocInfo::NONE64) { + return ConstantPoolArray::INT64; + } else if (!RelocInfo::IsGCRelocMode(rmode)) { + return ConstantPoolArray::INT32; + } else if (RelocInfo::IsCodeTarget(rmode)) { + return ConstantPoolArray::CODE_PTR; + } else { + ASSERT(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode)); + return ConstantPoolArray::HEAP_PTR; + } } -void ConstantPoolBuilder::AddEntry(Assembler* assm, - const RelocInfo& rinfo) { +ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry( + Assembler* assm, const RelocInfo& rinfo) { RelocInfo::Mode rmode = rinfo.rmode(); ASSERT(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION && @@ -3532,56 +3589,45 @@ void ConstantPoolBuilder::AddEntry(Assembler* assm, // Try to merge entries which won't be patched. int merged_index = -1; + ConstantPoolArray::LayoutSection entry_section = current_section_; if (RelocInfo::IsNone(rmode) || (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) { size_t i; - std::vector::const_iterator it; + std::vector::const_iterator it; for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) { - if (RelocInfo::IsEqual(rinfo, *it)) { + if (RelocInfo::IsEqual(rinfo, it->rinfo_)) { + // Merge with found entry. merged_index = i; + entry_section = entries_[i].section_; break; } } } - - entries_.push_back(rinfo); - merged_indexes_.push_back(merged_index); + ASSERT(entry_section <= current_section_); + entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index)); if (merged_index == -1) { // Not merged, so update the appropriate count. - if (Is64BitEntry(rmode)) { - count_of_64bit_++; - } else if (Is32BitEntry(rmode)) { - count_of_32bit_++; - } else if (IsCodePtrEntry(rmode)) { - count_of_code_ptr_++; - } else { - ASSERT(IsHeapPtrEntry(rmode)); - count_of_heap_ptr_++; - } + number_of_entries_[entry_section].increment(GetConstantPoolType(rmode)); } - // Check if we still have room for another entry given Arm's ldr and vldr - // immediate offset range. - // TODO(rmcilroy): Avoid creating a new object here when we support - // extended constant pools. - ConstantPoolArray::NumberOfEntries total(count_of_64bit_, - count_of_code_ptr_, - count_of_heap_ptr_, - count_of_32bit_); - ConstantPoolArray::NumberOfEntries int64_counts(count_of_64bit_, 0, 0, 0); - if (!(is_uint12(ConstantPoolArray::SizeFor(total)) && - is_uint10(ConstantPoolArray::SizeFor(int64_counts)))) { - assm->set_constant_pool_full(); + // Check if we still have room for another entry in the small section + // given Arm's ldr and vldr immediate offset range. + if (current_section_ == ConstantPoolArray::SMALL_SECTION && + !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) && + is_uint10(ConstantPoolArray::MaxInt64Offset( + small_entries()->count_of(ConstantPoolArray::INT64))))) { + current_section_ = ConstantPoolArray::EXTENDED_SECTION; } + return entry_section; } void ConstantPoolBuilder::Relocate(int pc_delta) { - for (std::vector::iterator rinfo = entries_.begin(); - rinfo != entries_.end(); rinfo++) { - ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN); - rinfo->set_pc(rinfo->pc() + pc_delta); + for (std::vector::iterator entry = entries_.begin(); + entry != entries_.end(); entry++) { + ASSERT(entry->rinfo_.rmode() != RelocInfo::JS_RETURN); + entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta); } } @@ -3589,89 +3635,98 @@ void ConstantPoolBuilder::Relocate(int pc_delta) { Handle ConstantPoolBuilder::New(Isolate* isolate) { if (IsEmpty()) { return isolate->factory()->empty_constant_pool_array(); + } else if (extended_entries()->is_empty()) { + return isolate->factory()->NewConstantPoolArray(*small_entries()); } else { - ConstantPoolArray::NumberOfEntries small(count_of_64bit_, - count_of_code_ptr_, - count_of_heap_ptr_, - count_of_32bit_); - return isolate->factory()->NewConstantPoolArray(small); + ASSERT(current_section_ == ConstantPoolArray::EXTENDED_SECTION); + return isolate->factory()->NewExtendedConstantPoolArray( + *small_entries(), *extended_entries()); } } void ConstantPoolBuilder::Populate(Assembler* assm, ConstantPoolArray* constant_pool) { - ASSERT(count_of_64bit_ == constant_pool->number_of_entries( - ConstantPoolArray::INT64, ConstantPoolArray::SMALL_SECTION)); - ASSERT(count_of_code_ptr_ == constant_pool->number_of_entries( - ConstantPoolArray::CODE_PTR, ConstantPoolArray::SMALL_SECTION)); - ASSERT(count_of_heap_ptr_ == constant_pool->number_of_entries( - ConstantPoolArray::HEAP_PTR, ConstantPoolArray::SMALL_SECTION)); - ASSERT(count_of_32bit_ == constant_pool->number_of_entries( - ConstantPoolArray::INT32, ConstantPoolArray::SMALL_SECTION)); - ASSERT(entries_.size() == merged_indexes_.size()); - - int index_64bit = 0; - int index_code_ptr = count_of_64bit_; - int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_; - int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_; - - size_t i; - std::vector::const_iterator rinfo; - for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) { - RelocInfo::Mode rmode = rinfo->rmode(); + ASSERT_EQ(extended_entries()->is_empty(), + !constant_pool->is_extended_layout()); + ASSERT(small_entries()->equals(ConstantPoolArray::NumberOfEntries( + constant_pool, ConstantPoolArray::SMALL_SECTION))); + if (constant_pool->is_extended_layout()) { + ASSERT(extended_entries()->equals(ConstantPoolArray::NumberOfEntries( + constant_pool, ConstantPoolArray::EXTENDED_SECTION))); + } + + ConstantPoolArray::NumberOfEntries small_idx; + ConstantPoolArray::NumberOfEntries extended_idx; + for (std::vector::iterator entry = entries_.begin(); + entry != entries_.end(); entry++) { + RelocInfo rinfo = entry->rinfo_; + RelocInfo::Mode rmode = entry->rinfo_.rmode(); + ConstantPoolArray::Type type = GetConstantPoolType(rmode); // Update constant pool if necessary and get the entry's offset. int offset; - if (merged_indexes_[i] == -1) { - if (Is64BitEntry(rmode)) { - offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag; - constant_pool->set(index_64bit++, rinfo->data64()); - } else if (Is32BitEntry(rmode)) { - offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag; - constant_pool->set(index_32bit++, static_cast(rinfo->data())); - } else if (IsCodePtrEntry(rmode)) { - offset = constant_pool->OffsetOfElementAt(index_code_ptr) - - kHeapObjectTag; - constant_pool->set(index_code_ptr++, - reinterpret_cast
(rinfo->data())); + if (entry->merged_index_ == -1) { + int index; + if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) { + index = small_entries()->total_count() + + extended_entries()->base_of(type) + extended_idx.count_of(type); + extended_idx.increment(type); + } else { + ASSERT(entry->section_ == ConstantPoolArray::SMALL_SECTION); + index = small_entries()->base_of(type) + small_idx.count_of(type); + small_idx.increment(type); + } + if (type == ConstantPoolArray::INT64) { + constant_pool->set(index, rinfo.data64()); + } else if (type == ConstantPoolArray::INT32) { + constant_pool->set(index, static_cast(rinfo.data())); + } else if (type == ConstantPoolArray::CODE_PTR) { + constant_pool->set(index, reinterpret_cast
(rinfo.data())); } else { - ASSERT(IsHeapPtrEntry(rmode)); - offset = constant_pool->OffsetOfElementAt(index_heap_ptr) - - kHeapObjectTag; - constant_pool->set(index_heap_ptr++, - reinterpret_cast(rinfo->data())); + ASSERT(type == ConstantPoolArray::HEAP_PTR); + constant_pool->set(index, reinterpret_cast(rinfo.data())); } - merged_indexes_[i] = offset; // Stash offset for merged entries. + offset = constant_pool->OffsetOfElementAt(index) - kHeapObjectTag; + entry->merged_index_ = offset; // Stash offset for merged entries. } else { - size_t merged_index = static_cast(merged_indexes_[i]); - ASSERT(merged_index < merged_indexes_.size() && merged_index < i); - offset = merged_indexes_[merged_index]; + ASSERT(entry->merged_index_ < (entry - entries_.begin())); + offset = entries_[entry->merged_index_].merged_index_; } // Patch vldr/ldr instruction with correct offset. - Instr instr = assm->instr_at(rinfo->pc()); - if (Is64BitEntry(rmode)) { + Instr instr = assm->instr_at(rinfo.pc()); + if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) { + // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0]. + Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize); + ASSERT((Assembler::IsMovW(instr) && + Instruction::ImmedMovwMovtValue(instr) == 0)); + ASSERT((Assembler::IsMovT(next_instr) && + Instruction::ImmedMovwMovtValue(next_instr) == 0)); + assm->instr_at_put(rinfo.pc(), + Assembler::PatchMovwImmediate(instr, offset & 0xffff)); + assm->instr_at_put( + rinfo.pc() + Assembler::kInstrSize, + Assembler::PatchMovwImmediate(next_instr, offset >> 16)); + } else if (type == ConstantPoolArray::INT64) { // Instruction to patch must be 'vldr rd, [pp, #0]'. ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) && Assembler::GetVldrDRegisterImmediateOffset(instr) == 0)); ASSERT(is_uint10(offset)); - assm->instr_at_put(rinfo->pc(), - Assembler::SetVldrDRegisterImmediateOffset(instr, offset)); + assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset( + instr, offset)); } else { // Instruction to patch must be 'ldr rd, [pp, #0]'. ASSERT((Assembler::IsLdrPpImmediateOffset(instr) && Assembler::GetLdrRegisterImmediateOffset(instr) == 0)); ASSERT(is_uint12(offset)); - assm->instr_at_put(rinfo->pc(), - Assembler::SetLdrRegisterImmediateOffset(instr, offset)); + assm->instr_at_put( + rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset)); } } - ASSERT((index_64bit == count_of_64bit_) && - (index_code_ptr == (index_64bit + count_of_code_ptr_)) && - (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) && - (index_32bit == (index_heap_ptr + count_of_32bit_))); + ASSERT(small_idx.equals(*small_entries())); + ASSERT(extended_idx.equals(*extended_entries())); } diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h index 2fe4cf6..e0b89a5 100644 --- a/src/arm/assembler-arm.h +++ b/src/arm/assembler-arm.h @@ -518,13 +518,18 @@ class Operand BASE_EMBEDDED { // Return true if this is a register operand. INLINE(bool is_reg() const); - // Return true if this operand fits in one instruction so that no - // 2-instruction solution with a load into the ip register is necessary. If + // Return the number of actual instructions required to implement the given + // instruction for this particular operand. This can be a single instruction, + // if no load into the ip register is necessary, or anything between 2 and 4 + // instructions when we need to load from the constant pool (depending upon + // whether the constant pool entry is in the small or extended section). If // the instruction this operand is used for is a MOV or MVN instruction the // actual instruction to use is required for this calculation. For other // instructions instr is ignored. - bool is_single_instruction(const Assembler* assembler, - Instr instr = 0) const; + // + // The value returned is only valid as long as no entries are added to the + // constant pool between this call and the actual instruction being emitted. + int instructions_required(const Assembler* assembler, Instr instr = 0) const; bool must_output_reloc_info(const Assembler* assembler) const; inline int32_t immediate() const { @@ -645,31 +650,46 @@ class NeonListOperand BASE_EMBEDDED { class ConstantPoolBuilder BASE_EMBEDDED { public: explicit ConstantPoolBuilder(); - void AddEntry(Assembler* assm, const RelocInfo& rinfo); + ConstantPoolArray::LayoutSection AddEntry(Assembler* assm, + const RelocInfo& rinfo); void Relocate(int pc_delta); bool IsEmpty(); Handle New(Isolate* isolate); void Populate(Assembler* assm, ConstantPoolArray* constant_pool); - inline int count_of_64bit() const { return count_of_64bit_; } - inline int count_of_code_ptr() const { return count_of_code_ptr_; } - inline int count_of_heap_ptr() const { return count_of_heap_ptr_; } - inline int count_of_32bit() const { return count_of_32bit_; } + inline ConstantPoolArray::LayoutSection current_section() const { + return current_section_; + } + + inline ConstantPoolArray::NumberOfEntries* number_of_entries( + ConstantPoolArray::LayoutSection section) { + return &number_of_entries_[section]; + } + + inline ConstantPoolArray::NumberOfEntries* small_entries() { + return number_of_entries(ConstantPoolArray::SMALL_SECTION); + } + + inline ConstantPoolArray::NumberOfEntries* extended_entries() { + return number_of_entries(ConstantPoolArray::EXTENDED_SECTION); + } private: - bool Is64BitEntry(RelocInfo::Mode rmode); - bool Is32BitEntry(RelocInfo::Mode rmode); - bool IsCodePtrEntry(RelocInfo::Mode rmode); - bool IsHeapPtrEntry(RelocInfo::Mode rmode); - - // TODO(rmcilroy): This should ideally be a ZoneList, however that would mean - // RelocInfo would need to subclass ZoneObject which it currently doesn't. - std::vector entries_; - std::vector merged_indexes_; - int count_of_64bit_; - int count_of_code_ptr_; - int count_of_heap_ptr_; - int count_of_32bit_; + struct ConstantPoolEntry { + ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section, + int merged_index) + : rinfo_(rinfo), section_(section), merged_index_(merged_index) {} + + RelocInfo rinfo_; + ConstantPoolArray::LayoutSection section_; + int merged_index_; + }; + + ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode); + + std::vector entries_; + ConstantPoolArray::LayoutSection current_section_; + ConstantPoolArray::NumberOfEntries number_of_entries_[2]; }; struct VmovIndex { @@ -723,6 +743,10 @@ class Assembler : public AssemblerBase { // Manages the jump elimination optimization if the second parameter is true. int branch_offset(Label* L, bool jump_elimination_allowed); + // Returns true if the given pc address is the start of a constant pool load + // instruction sequence. + INLINE(static bool is_constant_pool_load(Address pc)); + // Return the address in the constant pool of the code target address used by // the branch/call instruction at pc, or the object in a mov. INLINE(static Address constant_pool_entry_address( @@ -1359,6 +1383,9 @@ class Assembler : public AssemblerBase { static bool IsLdrRegisterImmediate(Instr instr); static bool IsVldrDRegisterImmediate(Instr instr); static Instr GetConsantPoolLoadPattern(); + static Instr GetConsantPoolLoadMask(); + static bool IsLdrPpRegOffset(Instr instr); + static Instr GetLdrPpRegOffsetPattern(); static bool IsLdrPpImmediateOffset(Instr instr); static bool IsVldrDPpImmediateOffset(Instr instr); static int GetLdrRegisterImmediateOffset(Instr instr); @@ -1389,7 +1416,11 @@ class Assembler : public AssemblerBase { static int GetCmpImmediateRawImmediate(Instr instr); static bool IsNop(Instr instr, int type = NON_MARKING_NOP); static bool IsMovT(Instr instr); + static Instr GetMovTPattern(); static bool IsMovW(Instr instr); + static Instr GetMovWPattern(); + static Instr EncodeMovwImmediate(uint32_t immediate); + static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate); // Constants in pools are accessed via pc relative addressing, which can // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point @@ -1414,14 +1445,14 @@ class Assembler : public AssemblerBase { // Generate the constant pool for the generated code. void PopulateConstantPool(ConstantPoolArray* constant_pool); - bool can_use_constant_pool() const { - return is_constant_pool_available() && !constant_pool_full_; - } + bool is_constant_pool_available() const { return constant_pool_available_; } - void set_constant_pool_full() { - constant_pool_full_ = true; + bool use_extended_constant_pool() const { + return constant_pool_builder_.current_section() == + ConstantPoolArray::EXTENDED_SECTION; } + protected: // Relocation for a type-recording IC has the AST id added to it. This // member variable is a way to pass the information from the call site to @@ -1475,10 +1506,6 @@ class Assembler : public AssemblerBase { (pc_offset() < no_const_pool_before_); } - bool is_constant_pool_available() const { - return constant_pool_available_; - } - void set_constant_pool_available(bool available) { constant_pool_available_ = available; } @@ -1548,9 +1575,6 @@ class Assembler : public AssemblerBase { // Indicates whether the constant pool can be accessed, which is only possible // if the pp register points to the current code object's constant pool. bool constant_pool_available_; - // Indicates whether the constant pool is too full to accept new entries due - // to the ldr instruction's limitted immediate offset range. - bool constant_pool_full_; // Code emission inline void CheckBuffer(); @@ -1582,7 +1606,7 @@ class Assembler : public AssemblerBase { // Record reloc info for current pc_ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); void RecordRelocInfo(const RelocInfo& rinfo); - void ConstantPoolAddEntry(const RelocInfo& rinfo); + ConstantPoolArray::LayoutSection ConstantPoolAddEntry(const RelocInfo& rinfo); friend class RelocInfo; friend class CodePatcher; diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index fa8f807..fbde24d 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -1683,7 +1683,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // * function: r1 or at sp. // // An inlined call site may have been generated before calling this stub. -// In this case the offset to the inline site to patch is passed in r5. +// In this case the offset to the inline sites to patch are passed in r5 and r6. // (See LCodeGen::DoInstanceOfKnownGlobal) void InstanceofStub::Generate(MacroAssembler* masm) { // Call site inlining and patching implies arguments in registers. @@ -1696,11 +1696,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) { Register map = r3; // Map of the object. const Register function = r1; // Function (rhs). const Register prototype = r4; // Prototype of the function. - const Register inline_site = r9; const Register scratch = r2; - const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize; - Label slow, loop, is_instance, is_not_instance, not_js_object; if (!HasArgsInRegisters()) { @@ -1742,14 +1739,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) { ASSERT(HasArgsInRegisters()); // Patch the (relocated) inlined map check. - // The offset was stored in r5 + // The map_load_offset was stored in r5 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). - const Register offset = r5; - __ sub(inline_site, lr, offset); + const Register map_load_offset = r5; + __ sub(r9, lr, map_load_offset); // Get the map location in r5 and patch it. - __ GetRelocatedValueLocation(inline_site, offset); - __ ldr(offset, MemOperand(offset)); - __ str(map, FieldMemOperand(offset, Cell::kValueOffset)); + __ GetRelocatedValueLocation(r9, map_load_offset, scratch); + __ ldr(map_load_offset, MemOperand(map_load_offset)); + __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset)); } // Register mapping: r3 is object map and r4 is function prototype. @@ -1778,9 +1775,12 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } else { // Patch the call site to return true. __ LoadRoot(r0, Heap::kTrueValueRootIndex); - __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // The bool_load_offset was stored in r6 + // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). + const Register bool_load_offset = r6; + __ sub(r9, lr, bool_load_offset); // Get the boolean result location in scratch and patch it. - __ GetRelocatedValueLocation(inline_site, scratch); + __ GetRelocatedValueLocation(r9, scratch, scratch2); __ str(r0, MemOperand(scratch)); if (!ReturnTrueFalseObject()) { @@ -1796,9 +1796,13 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } else { // Patch the call site to return false. __ LoadRoot(r0, Heap::kFalseValueRootIndex); - __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // The bool_load_offset was stored in r6 + // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). + const Register bool_load_offset = r6; + __ sub(r9, lr, bool_load_offset); + ; // Get the boolean result location in scratch and patch it. - __ GetRelocatedValueLocation(inline_site, scratch); + __ GetRelocatedValueLocation(r9, scratch, scratch2); __ str(r0, MemOperand(scratch)); if (!ReturnTrueFalseObject()) { diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h index 6a2d82f..3316bb5 100644 --- a/src/arm/constants-arm.h +++ b/src/arm/constants-arm.h @@ -568,6 +568,7 @@ class Instruction { inline int Immed4Value() const { return Bits(19, 16); } inline int ImmedMovwMovtValue() const { return Immed4Value() << 12 | Offset12Value(); } + DECLARE_STATIC_ACCESSOR(ImmedMovwMovtValue); // Fields used in Load/Store instructions inline int PUValue() const { return Bits(24, 23); } diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index c5576df..bb6beec 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -346,13 +346,27 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) { } +static const int kProfileCounterResetSequenceLength = 5 * Assembler::kInstrSize; + + void FullCodeGenerator::EmitProfilingCounterReset() { + Assembler::BlockConstPoolScope block_const_pool(masm_); + PredictableCodeSizeScope predictable_code_size_scope( + masm_, kProfileCounterResetSequenceLength); + Label start; + __ bind(&start); int reset_value = FLAG_interrupt_budget; if (info_->is_debug()) { // Detect debug break requests as soon as possible. reset_value = FLAG_interrupt_budget >> 4; } __ mov(r2, Operand(profiling_counter_)); + // The mov instruction above can be either 1, 2 or 3 instructions depending + // upon whether it is an extended constant pool - insert nop to compensate. + ASSERT(masm_->InstructionsGeneratedSince(&start) <= 3); + while (masm_->InstructionsGeneratedSince(&start) != 3) { + __ nop(); + } __ mov(r3, Operand(Smi::FromInt(reset_value))); __ str(r3, FieldMemOperand(r2, Cell::kValueOffset)); } @@ -4747,10 +4761,18 @@ static Address GetInterruptImmediateLoadAddress(Address pc) { Address load_address = pc - 2 * Assembler::kInstrSize; if (!FLAG_enable_ool_constant_pool) { ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address))); + } else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) { + // This is an extended constant pool lookup. + load_address -= 2 * Assembler::kInstrSize; + ASSERT(Assembler::IsMovW(Memory::int32_at(load_address))); + ASSERT(Assembler::IsMovT( + Memory::int32_at(load_address + Assembler::kInstrSize))); } else if (Assembler::IsMovT(Memory::int32_at(load_address))) { + // This is a movw_movt immediate load. load_address -= Assembler::kInstrSize; ASSERT(Assembler::IsMovW(Memory::int32_at(load_address))); } else { + // This is a small constant pool lookup. ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address))); } return load_address; @@ -4770,14 +4792,19 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code, // // bpl ok // ; load interrupt stub address into ip - either of: - // ldr ip, [pc/pp, ] | movw ip, - // | movt ip, + // ; | | + // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm + // | movt ip, #imm> | movw ip, #imm + // | ldr ip, [pp, ip] // blx ip + // // ok-label - // Calculate branch offet to the ok-label - this is the difference between - // the branch address and |pc| (which points at ) plus one instr. - int branch_offset = pc + Assembler::kInstrSize - branch_address; + // Calculate branch offset to the ok-label - this is the difference + // between the branch address and |pc| (which points at ) plus + // kProfileCounterResetSequence instructions + int branch_offset = pc - Instruction::kPCReadOffset - branch_address + + kProfileCounterResetSequenceLength; patcher.masm()->b(branch_offset, pl); break; } @@ -4786,9 +4813,12 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code, // // mov r0, r0 (NOP) // ; load on-stack replacement address into ip - either of: - // ldr ip, [pc/pp, ] | movw ip, - // | movt ip, + // ; | | + // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm + // | movt ip, #imm> | movw ip, #imm + // | ldr ip, [pp, ip] // blx ip + // // ok-label patcher.masm()->nop(); break; diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc index 7d76086..01481c3 100644 --- a/src/arm/lithium-codegen-arm.cc +++ b/src/arm/lithium-codegen-arm.cc @@ -2764,13 +2764,17 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { LInstanceOfKnownGlobal* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() V8_OVERRIDE { - codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); + codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_, + &load_bool_); } virtual LInstruction* instr() V8_OVERRIDE { return instr_; } Label* map_check() { return &map_check_; } + Label* load_bool() { return &load_bool_; } + private: LInstanceOfKnownGlobal* instr_; Label map_check_; + Label load_bool_; }; DeferredInstanceOfKnownGlobal* deferred; @@ -2798,12 +2802,12 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { // We use Factory::the_hole_value() on purpose instead of loading from the // root array to force relocation to be able to later patch with // the cached map. - PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize); Handle cell = factory()->NewCell(factory()->the_hole_value()); __ mov(ip, Operand(Handle(cell))); __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset)); __ cmp(map, Operand(ip)); __ b(ne, &cache_miss); + __ bind(deferred->load_bool()); // Label for calculating code patching. // We use Factory::the_hole_value() on purpose instead of loading from the // root array to force relocation to be able to later patch // with true or false. @@ -2837,7 +2841,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, - Label* map_check) { + Label* map_check, + Label* bool_load) { InstanceofStub::Flags flags = InstanceofStub::kNoFlags; flags = static_cast( flags | InstanceofStub::kArgsInRegisters); @@ -2851,21 +2856,35 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, LoadContextFromDeferred(instr->context()); __ Move(InstanceofStub::right(), instr->function()); - static const int kAdditionalDelta = 4; + + int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET); + int additional_delta = (call_size / Assembler::kInstrSize) + 4; // Make sure that code size is predicable, since we use specific constants // offsets in the code to find embedded values.. - PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize); - int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; - Label before_push_delta; - __ bind(&before_push_delta); - __ BlockConstPoolFor(kAdditionalDelta); - // r5 is used to communicate the offset to the location of the map check. - __ mov(r5, Operand(delta * kPointerSize)); - // The mov above can generate one or two instructions. The delta was computed - // for two instructions, so we need to pad here in case of one instruction. - if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) { - ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta)); - __ nop(); + PredictableCodeSizeScope predictable( + masm_, (additional_delta + 1) * Assembler::kInstrSize); + // Make sure we don't emit any additional entries in the constant pool before + // the call to ensure that the CallCodeSize() calculated the correct number of + // instructions for the constant pool load. + { + ConstantPoolUnavailableScope constant_pool_unavailable(masm_); + int map_check_delta = + masm_->InstructionsGeneratedSince(map_check) + additional_delta; + int bool_load_delta = + masm_->InstructionsGeneratedSince(bool_load) + additional_delta; + Label before_push_delta; + __ bind(&before_push_delta); + __ BlockConstPoolFor(additional_delta); + // r5 is used to communicate the offset to the location of the map check. + __ mov(r5, Operand(map_check_delta * kPointerSize)); + // r6 is used to communicate the offset to the location of the bool load. + __ mov(r6, Operand(bool_load_delta * kPointerSize)); + // The mov above can generate one or two instructions. The delta was + // computed for two instructions, so we need to pad here in case of one + // instruction. + while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) { + __ nop(); + } } CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, @@ -3928,7 +3947,13 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { ASSERT(instr->target()->IsRegister()); Register target = ToRegister(instr->target()); generator.BeforeCall(__ CallSize(target)); - __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Make sure we don't emit any additional entries in the constant pool + // before the call to ensure that the CallCodeSize() calculated the correct + // number of instructions for the constant pool load. + { + ConstantPoolUnavailableScope constant_pool_unavailable(masm_); + __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); + } __ Call(target); } generator.AfterCall(); diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h index b20b3f2..8daf590 100644 --- a/src/arm/lithium-codegen-arm.h +++ b/src/arm/lithium-codegen-arm.h @@ -116,7 +116,7 @@ class LCodeGen: public LCodeGenBase { void DoDeferredStringCharFromCode(LStringCharFromCode* instr); void DoDeferredAllocate(LAllocate* instr); void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, - Label* map_check); + Label* map_check, Label* bool_load); void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result, diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc index 0d7a1d8..57d5542 100644 --- a/src/arm/macro-assembler-arm.cc +++ b/src/arm/macro-assembler-arm.cc @@ -74,13 +74,10 @@ void MacroAssembler::Call(Register target, Condition cond) { int MacroAssembler::CallSize( Address target, RelocInfo::Mode rmode, Condition cond) { - int size = 2 * kInstrSize; Instr mov_instr = cond | MOV | LeaveCC; - intptr_t immediate = reinterpret_cast(target); - if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) { - size += kInstrSize; - } - return size; + Operand mov_operand = Operand(reinterpret_cast(target), rmode); + return kInstrSize + + mov_operand.instructions_required(this, mov_instr) * kInstrSize; } @@ -94,13 +91,10 @@ int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate, Address target, RelocInfo::Mode rmode, Condition cond) { - int size = 2 * kInstrSize; Instr mov_instr = cond | MOV | LeaveCC; - intptr_t immediate = reinterpret_cast(target); - if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) { - size += kInstrSize; - } - return size; + Operand mov_operand = Operand(reinterpret_cast(target), rmode); + return kInstrSize + + mov_operand.instructions_required(NULL, mov_instr) * kInstrSize; } @@ -273,7 +267,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2, !src2.must_output_reloc_info(this) && src2.immediate() == 0) { mov(dst, Operand::Zero(), LeaveCC, cond); - } else if (!src2.is_single_instruction(this) && + } else if (!(src2.instructions_required(this) == 1) && !src2.must_output_reloc_info(this) && CpuFeatures::IsSupported(ARMv7) && IsPowerOf2(src2.immediate() + 1)) { @@ -1851,7 +1845,7 @@ void MacroAssembler::Allocate(int object_size, object_size -= bits; shift += 8; Operand bits_operand(bits); - ASSERT(bits_operand.is_single_instruction(this)); + ASSERT(bits_operand.instructions_required(this) == 1); add(scratch2, source, bits_operand, SetCC, cond); source = scratch2; cond = cc; @@ -3630,9 +3624,50 @@ void MacroAssembler::CallCFunctionHelper(Register function, void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, - Register result) { - const uint32_t kLdrOffsetMask = (1 << 12) - 1; + Register result, + Register scratch) { + Label small_constant_pool_load, load_result; ldr(result, MemOperand(ldr_location)); + + if (FLAG_enable_ool_constant_pool) { + // Check if this is an extended constant pool load. + and_(scratch, result, Operand(GetConsantPoolLoadMask())); + teq(scratch, Operand(GetConsantPoolLoadPattern())); + b(eq, &small_constant_pool_load); + if (emit_debug_code()) { + // Check that the instruction sequence is: + // movw reg, #offset_low + // movt reg, #offset_high + // ldr reg, [pp, reg] + Instr patterns[] = {GetMovWPattern(), GetMovTPattern(), + GetLdrPpRegOffsetPattern()}; + for (int i = 0; i < 3; i++) { + ldr(result, MemOperand(ldr_location, i * kInstrSize)); + and_(result, result, Operand(patterns[i])); + cmp(result, Operand(patterns[i])); + Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool); + } + // Result was clobbered. Restore it. + ldr(result, MemOperand(ldr_location)); + } + + // Get the offset into the constant pool. First extract movw immediate into + // result. + and_(scratch, result, Operand(0xfff)); + mov(ip, Operand(result, LSR, 4)); + and_(ip, ip, Operand(0xf000)); + orr(result, scratch, Operand(ip)); + // Then extract movt immediate and or into result. + ldr(scratch, MemOperand(ldr_location, kInstrSize)); + and_(ip, scratch, Operand(0xf0000)); + orr(result, result, Operand(ip, LSL, 12)); + and_(scratch, scratch, Operand(0xfff)); + orr(result, result, Operand(scratch, LSL, 16)); + + b(&load_result); + } + + bind(&small_constant_pool_load); if (emit_debug_code()) { // Check that the instruction is a ldr reg, [ + offset] . and_(result, result, Operand(GetConsantPoolLoadPattern())); @@ -3641,8 +3676,13 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, // Result was clobbered. Restore it. ldr(result, MemOperand(ldr_location)); } - // Get the address of the constant. + + // Get the offset into the constant pool. + const uint32_t kLdrOffsetMask = (1 << 12) - 1; and_(result, result, Operand(kLdrOffsetMask)); + + bind(&load_result); + // Get the address of the constant. if (FLAG_enable_ool_constant_pool) { add(result, pp, Operand(result)); } else { diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h index e30096a..80ed85c 100644 --- a/src/arm/macro-assembler-arm.h +++ b/src/arm/macro-assembler-arm.h @@ -76,12 +76,11 @@ class MacroAssembler: public Assembler { // macro assembler. MacroAssembler(Isolate* isolate, void* buffer, int size); - // Jump, Call, and Ret pseudo instructions implementing inter-working. - void Jump(Register target, Condition cond = al); - void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); - void Jump(Handle code, RelocInfo::Mode rmode, Condition cond = al); + + // Returns the size of a call in instructions. Note, the value returned is + // only valid as long as no entries are added to the constant pool between + // checking the call size and emitting the actual call. static int CallSize(Register target, Condition cond = al); - void Call(Register target, Condition cond = al); int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); int CallStubSize(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(), @@ -90,6 +89,12 @@ class MacroAssembler: public Assembler { Address target, RelocInfo::Mode rmode, Condition cond = al); + + // Jump, Call, and Ret pseudo instructions implementing inter-working. + void Jump(Register target, Condition cond = al); + void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); + void Jump(Handle code, RelocInfo::Mode rmode, Condition cond = al); + void Call(Register target, Condition cond = al); void Call(Address target, RelocInfo::Mode rmode, Condition cond = al, TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); @@ -1354,8 +1359,8 @@ class MacroAssembler: public Assembler { // Get the location of a relocated constant (its address in the constant pool) // from its load site. - void GetRelocatedValueLocation(Register ldr_location, - Register result); + void GetRelocatedValueLocation(Register ldr_location, Register result, + Register scratch); void ClampUint8(Register output_reg, Register input_reg); diff --git a/src/debug.cc b/src/debug.cc index ca32957..0a02adc 100644 --- a/src/debug.cc +++ b/src/debug.cc @@ -2360,24 +2360,27 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) { // Continue just after the slot. after_break_target_ = addr + Assembler::kDebugBreakSlotLength; - } else if (IsDebugBreak(Assembler::target_address_at(addr, *code))) { - // We now know that there is still a debug break call at the target address, - // so the break point is still there and the original code will hold the - // address to jump to in order to complete the call which is replaced by a - // call to DebugBreakXXX. - - // Find the corresponding address in the original code. - addr += original_code->instruction_start() - code->instruction_start(); - - // Install jump to the call address in the original code. This will be the - // call which was overwritten by the call to DebugBreakXXX. - after_break_target_ = Assembler::target_address_at(addr, *original_code); } else { - // There is no longer a break point present. Don't try to look in the - // original code as the running code will have the right address. This takes - // care of the case where the last break point is removed from the function - // and therefore no "original code" is available. - after_break_target_ = Assembler::target_address_at(addr, *code); + addr = Assembler::target_address_from_return_address(frame->pc()); + if (IsDebugBreak(Assembler::target_address_at(addr, *code))) { + // We now know that there is still a debug break call at the target + // address, so the break point is still there and the original code will + // hold the address to jump to in order to complete the call which is + // replaced by a call to DebugBreakXXX. + + // Find the corresponding address in the original code. + addr += original_code->instruction_start() - code->instruction_start(); + + // Install jump to the call address in the original code. This will be the + // call which was overwritten by the call to DebugBreakXXX. + after_break_target_ = Assembler::target_address_at(addr, *original_code); + } else { + // There is no longer a break point present. Don't try to look in the + // original code as the running code will have the right address. This + // takes care of the case where the last break point is removed from the + // function and therefore no "original code" is available. + after_break_target_ = Assembler::target_address_at(addr, *code); + } } } diff --git a/src/objects-inl.h b/src/objects-inl.h index abbbe91..b90656f 100644 --- a/src/objects-inl.h +++ b/src/objects-inl.h @@ -2269,6 +2269,84 @@ void FixedDoubleArray::FillWithHoles(int from, int to) { } +void ConstantPoolArray::NumberOfEntries::increment(Type type) { + ASSERT(type < NUMBER_OF_TYPES); + element_counts_[type]++; +} + + +int ConstantPoolArray::NumberOfEntries::equals( + const ConstantPoolArray::NumberOfEntries& other) const { + for (int i = 0; i < NUMBER_OF_TYPES; i++) { + if (element_counts_[i] != other.element_counts_[i]) return false; + } + return true; +} + + +bool ConstantPoolArray::NumberOfEntries::is_empty() const { + return total_count() == 0; +} + + +int ConstantPoolArray::NumberOfEntries::count_of(Type type) const { + ASSERT(type < NUMBER_OF_TYPES); + return element_counts_[type]; +} + + +int ConstantPoolArray::NumberOfEntries::base_of(Type type) const { + int base = 0; + ASSERT(type < NUMBER_OF_TYPES); + for (int i = 0; i < type; i++) { + base += element_counts_[i]; + } + return base; +} + + +int ConstantPoolArray::NumberOfEntries::total_count() const { + int count = 0; + for (int i = 0; i < NUMBER_OF_TYPES; i++) { + count += element_counts_[i]; + } + return count; +} + + +int ConstantPoolArray::NumberOfEntries::are_in_range(int min, int max) const { + for (int i = FIRST_TYPE; i < NUMBER_OF_TYPES; i++) { + if (element_counts_[i] < min || element_counts_[i] > max) { + return false; + } + } + return true; +} + + +int ConstantPoolArray::Iterator::next_index() { + ASSERT(!is_finished()); + int ret = next_index_++; + update_section(); + return ret; +} + + +bool ConstantPoolArray::Iterator::is_finished() { + return next_index_ > array_->last_index(type_, final_section_); +} + + +void ConstantPoolArray::Iterator::update_section() { + if (next_index_ > array_->last_index(type_, current_section_) && + current_section_ != final_section_) { + ASSERT(final_section_ == EXTENDED_SECTION); + current_section_ = EXTENDED_SECTION; + next_index_ = array_->first_index(type_, EXTENDED_SECTION); + } +} + + bool ConstantPoolArray::is_extended_layout() { uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset); return IsExtendedField::decode(small_layout_1); @@ -2524,29 +2602,6 @@ int ConstantPoolArray::length() { } -int ConstantPoolArray::Iterator::next_index() { - ASSERT(!is_finished()); - int ret = next_index_++; - update_section(); - return ret; -} - - -bool ConstantPoolArray::Iterator::is_finished() { - return next_index_ > array_->last_index(type_, final_section_); -} - - -void ConstantPoolArray::Iterator::update_section() { - if (next_index_ > array_->last_index(type_, current_section_) && - current_section_ != final_section_) { - ASSERT(final_section_ == EXTENDED_SECTION); - current_section_ = EXTENDED_SECTION; - next_index_ = array_->first_index(type_, EXTENDED_SECTION); - } -} - - WriteBarrierMode HeapObject::GetWriteBarrierMode( const DisallowHeapAllocation& promise) { Heap* heap = GetHeap(); diff --git a/src/objects.cc b/src/objects.cc index 41a7771..2672aac 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -9710,15 +9710,23 @@ bool Map::EquivalentToForNormalization(Map* other, void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) { - ConstantPoolArray::Iterator code_iter(this, ConstantPoolArray::CODE_PTR); - while (!code_iter.is_finished()) { - v->VisitCodeEntry(reinterpret_cast
( - RawFieldOfElementAt(code_iter.next_index()))); - } - - ConstantPoolArray::Iterator heap_iter(this, ConstantPoolArray::HEAP_PTR); - while (!heap_iter.is_finished()) { - v->VisitPointer(RawFieldOfElementAt(heap_iter.next_index())); + // Unfortunately the serializer relies on pointers within an object being + // visited in-order, so we have to iterate both the code and heap pointers in + // the small section before doing so in the extended section. + for (int s = 0; s <= final_section(); ++s) { + LayoutSection section = static_cast(s); + ConstantPoolArray::Iterator code_iter(this, ConstantPoolArray::CODE_PTR, + section); + while (!code_iter.is_finished()) { + v->VisitCodeEntry(reinterpret_cast
( + RawFieldOfElementAt(code_iter.next_index()))); + } + + ConstantPoolArray::Iterator heap_iter(this, ConstantPoolArray::HEAP_PTR, + section); + while (!heap_iter.is_finished()) { + v->VisitPointer(RawFieldOfElementAt(heap_iter.next_index())); + } } } diff --git a/src/objects.h b/src/objects.h index 2e78413..6e996a9 100644 --- a/src/objects.h +++ b/src/objects.h @@ -3135,6 +3135,12 @@ class ConstantPoolArray: public HeapObject { class NumberOfEntries BASE_EMBEDDED { public: + inline NumberOfEntries() { + for (int i = 0; i < NUMBER_OF_TYPES; i++) { + element_counts_[i] = 0; + } + } + inline NumberOfEntries(int int64_count, int code_ptr_count, int heap_ptr_count, int int32_count) { element_counts_[INT64] = int64_count; @@ -3150,27 +3156,13 @@ class ConstantPoolArray: public HeapObject { element_counts_[INT32] = array->number_of_entries(INT32, section); } - inline int count_of(Type type) const { - ASSERT(type < NUMBER_OF_TYPES); - return element_counts_[type]; - } - - inline int total_count() const { - int count = 0; - for (int i = 0; i < NUMBER_OF_TYPES; i++) { - count += element_counts_[i]; - } - return count; - } - - inline int are_in_range(int min, int max) const { - for (int i = FIRST_TYPE; i < NUMBER_OF_TYPES; i++) { - if (element_counts_[i] < min || element_counts_[i] > max) { - return false; - } - } - return true; - } + inline void increment(Type type); + inline int equals(const NumberOfEntries& other) const; + inline bool is_empty() const; + inline int count_of(Type type) const; + inline int base_of(Type type) const; + inline int total_count() const; + inline int are_in_range(int min, int max) const; private: int element_counts_[NUMBER_OF_TYPES]; @@ -3179,14 +3171,26 @@ class ConstantPoolArray: public HeapObject { class Iterator BASE_EMBEDDED { public: inline Iterator(ConstantPoolArray* array, Type type) - : array_(array), type_(type), final_section_(array->final_section()) { - current_section_ = SMALL_SECTION; - next_index_ = array->first_index(type, SMALL_SECTION); + : array_(array), + type_(type), + final_section_(array->final_section()), + current_section_(SMALL_SECTION), + next_index_(array->first_index(type, SMALL_SECTION)) { + update_section(); + } + + inline Iterator(ConstantPoolArray* array, Type type, LayoutSection section) + : array_(array), + type_(type), + final_section_(section), + current_section_(section), + next_index_(array->first_index(type, section)) { update_section(); } inline int next_index(); inline bool is_finished(); + private: inline void update_section(); ConstantPoolArray* array_; @@ -3246,6 +3250,11 @@ class ConstantPoolArray: public HeapObject { // Garbage collection support. inline int size(); + + inline static int MaxInt64Offset(int number_of_int64) { + return kFirstEntryOffset + (number_of_int64 * kInt64Size); + } + inline static int SizeFor(const NumberOfEntries& small) { int size = kFirstEntryOffset + (small.count_of(INT64) * kInt64Size) + diff --git a/src/spaces.cc b/src/spaces.cc index fa78f75..80cb4da 100644 --- a/src/spaces.cc +++ b/src/spaces.cc @@ -1047,7 +1047,7 @@ intptr_t PagedSpace::SizeOfFirstPage() { int size = 0; switch (identity()) { case OLD_POINTER_SPACE: - size = 96 * kPointerSize * KB; + size = 112 * kPointerSize * KB; break; case OLD_DATA_SPACE: size = 192 * KB; -- 2.7.4