// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
-bool Operand::must_output_reloc_info(const Assembler* assembler) const {
+bool Operand::must_output_reloc_info(Isolate* isolate,
+ const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
if (assembler != NULL && assembler->predictable_code_size()) return true;
- return Serializer::enabled();
+ return Serializer::enabled(isolate);
} else if (RelocInfo::IsNone(rmode_)) {
return false;
}
}
-static bool use_mov_immediate_load(const Operand& x,
+static bool use_mov_immediate_load(Isolate* isolate,
+ const Operand& x,
const Assembler* assembler) {
if (assembler != NULL && !assembler->can_use_constant_pool()) {
// If there is no constant pool available, we must use an mov immediate.
(assembler == NULL || !assembler->predictable_code_size())) {
// Prefer movw / movt to constant pool if it is more efficient on the CPU.
return true;
- } else if (x.must_output_reloc_info(assembler)) {
+ } else if (x.must_output_reloc_info(isolate, assembler)) {
// Prefer constant pool if data is likely to be patched.
return false;
} else {
}
-bool Operand::is_single_instruction(const Assembler* assembler,
+bool Operand::is_single_instruction(Isolate* isolate,
+ const Assembler* assembler,
Instr instr) const {
if (rm_.is_valid()) return true;
uint32_t dummy1, dummy2;
- if (must_output_reloc_info(assembler) ||
+ if (must_output_reloc_info(isolate, assembler) ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- return !use_mov_immediate_load(*this, assembler);
+ return !use_mov_immediate_load(isolate, *this, assembler);
} else {
// If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two
const Operand& x,
Condition cond) {
RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
- if (x.must_output_reloc_info(this)) {
+ if (x.must_output_reloc_info(isolate(), this)) {
RecordRelocInfo(rinfo);
}
- if (use_mov_immediate_load(x, this)) {
+ if (use_mov_immediate_load(isolate(), x, this)) {
Register target = rd.code() == pc.code() ? ip : rd;
// TODO(rmcilroy): add ARMv6 support for immediate loads.
ASSERT(CpuFeatures::IsSupported(ARMv7));
- if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
+ if (!FLAG_enable_ool_constant_pool &&
+ x.must_output_reloc_info(isolate(), this)) {
// Make sure the movw/movt doesn't get separated.
BlockConstPoolFor(2);
}
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (x.must_output_reloc_info(this) ||
+ if (x.must_output_reloc_info(isolate(), this) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (src.must_output_reloc_info(this) ||
+ if (src.must_output_reloc_info(isolate(), this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
move_32_bit_immediate(ip, src);
if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
- if (!Serializer::enabled() && !emit_debug_code()) {
+ if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
return;
}
}
// data
bool found = false;
- if (!Serializer::enabled() && (rinfo.rmode() >= RelocInfo::CELL)) {
+ if (!Serializer::enabled(isolate()) &&
+ (rinfo.rmode() >= RelocInfo::CELL)) {
for (int j = 0; j < i; j++) {
RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
// Try to merge entries which won't be patched.
int merged_index = -1;
if (RelocInfo::IsNone(rmode) ||
- (!Serializer::enabled() && (rmode >= RelocInfo::CELL))) {
+ (!Serializer::enabled(assm->isolate()) && (rmode >= RelocInfo::CELL))) {
size_t i;
std::vector<RelocInfo>::const_iterator it;
for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
return Check(f, found_by_runtime_probing_only_);
}
- static bool IsSafeForSnapshot(CpuFeature f) {
+ static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
return Check(f, cross_compile_) ||
(IsSupported(f) &&
- (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ (!Serializer::enabled(isolate) || !IsFoundByRuntimeProbingOnly(f)));
}
static unsigned cache_line_size() { return cache_line_size_; }
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
- bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const;
- bool must_output_reloc_info(const Assembler* assembler) const;
+ bool is_single_instruction(Isolate* isolate,
+ const Assembler* assembler,
+ Instr instr = 0) const;
+ bool must_output_reloc_info(Isolate* isolate,
+ const Assembler* assembler) const;
inline int32_t immediate() const {
ASSERT(!rm_.is_valid());
#if defined(V8_HOST_ARCH_ARM)
OS::MemCopyUint8Function CreateMemCopyUint8Function(
- OS::MemCopyUint8Function stub) {
+ bool serializer_enabled,
+ OS::MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
- if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
+ if (serializer_enabled || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
return stub;
}
size_t actual_size;
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes =
- MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
+ MacroAssembler::CallSizeNotPredictableCodeSize(isolate,
+ deopt_entry,
RelocInfo::NONE32);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
- flags != ObjectLiteral::kFastElements ||
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ Serializer::enabled(isolate()) || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
__ CallStub(&stub);
__ IncrementCounter(
isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
- } else if (expr->depth() > 1 || Serializer::enabled() ||
+ } else if (expr->depth() > 1 || Serializer::enabled(isolate()) ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ mov(r0, Operand(Smi::FromInt(flags)));
__ Push(r3, r2, r1, r0);
// the it was just a plain use), so it is free to move the split child into
// the same register that is used for the use-at-start.
// See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
int fixed = 0;
int used_at_start = 0;
for (UseIterator it(instr); !it.Done(); it.Advance()) {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return IsCall();
+ }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return IsCall(); }
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
next_block_(NULL),
allocator_(allocator) { }
+ Isolate* isolate() const { return graph_->isolate(); }
+
// Build the sequence for the graph.
LPlatformChunk* Build();
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
intptr_t immediate = reinterpret_cast<intptr_t>(target);
- if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
+ if (!Operand(immediate, rmode).is_single_instruction(isolate(),
+ this,
+ mov_instr)) {
size += kInstrSize;
}
return size;
}
-int MacroAssembler::CallSizeNotPredictableCodeSize(
- Address target, RelocInfo::Mode rmode, Condition cond) {
+int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
+ Address target,
+ RelocInfo::Mode rmode,
+ Condition cond) {
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
intptr_t immediate = reinterpret_cast<intptr_t>(target);
- if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
+ if (!Operand(immediate, rmode).is_single_instruction(isolate,
+ NULL,
+ mov_instr)) {
size += kInstrSize;
}
return size;
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!src2.is_reg() &&
- !src2.must_output_reloc_info(this) &&
+ !src2.must_output_reloc_info(isolate(), this) &&
src2.immediate() == 0) {
mov(dst, Operand::Zero(), LeaveCC, cond);
- } else if (!src2.is_single_instruction(this) &&
- !src2.must_output_reloc_info(this) &&
+ } else if (!src2.is_single_instruction(isolate(), this) &&
+ !src2.must_output_reloc_info(isolate(), this) &&
CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0,
void MacroAssembler::PushSafepointRegistersAndDoubles() {
// Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
+ ASSERT(!Serializer::enabled(isolate()));
PushSafepointRegisters();
// Only save allocatable registers.
ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
void MacroAssembler::PopSafepointRegistersAndDoubles() {
// Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
+ ASSERT(!Serializer::enabled(isolate()));
// Only save allocatable registers.
ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
+ ASSERT(!Serializer::enabled(isolate()));
// General purpose registers are pushed last on the stack.
int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
object_size -= bits;
shift += 8;
Operand bits_operand(bits);
- ASSERT(bits_operand.is_single_instruction(this));
+ ASSERT(bits_operand.is_single_instruction(isolate(), this));
add(scratch2, source, bits_operand, SetCC, cond);
source = scratch2;
cond = cc;
int CallStubSize(CodeStub* stub,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
- static int CallSizeNotPredictableCodeSize(Address target,
+ static int CallSizeNotPredictableCodeSize(Isolate* isolate,
+ Address target,
RelocInfo::Mode rmode,
Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode,
}
-bool Operand::NeedsRelocation() const {
+bool Operand::NeedsRelocation(Isolate* isolate) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
- return Serializer::enabled();
+ return Serializer::enabled(isolate);
}
return !RelocInfo::IsNone(rmode_);
FlagsUpdate S,
AddSubOp op) {
ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(!operand.NeedsRelocation());
+ ASSERT(!operand.NeedsRelocation(isolate()));
if (operand.IsImmediate()) {
int64_t immediate = operand.immediate();
ASSERT(IsImmAddSub(immediate));
ASSERT(rd.SizeInBits() == rn.SizeInBits());
ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
- ASSERT(!operand.NeedsRelocation());
+ ASSERT(!operand.NeedsRelocation(isolate()));
Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
}
#ifdef USE_SIMULATOR
// Don't generate simulator specific code if we are building a snapshot, which
// might be run on real hardware.
- if (!Serializer::enabled()) {
+ if (!Serializer::enabled(isolate())) {
// The arguments to the debug marker need to be contiguous in memory, so
// make sure we don't try to emit pools.
BlockPoolsScope scope(this);
const Operand& operand,
LogicalOp op) {
ASSERT(rd.SizeInBits() == rn.SizeInBits());
- ASSERT(!operand.NeedsRelocation());
+ ASSERT(!operand.NeedsRelocation(isolate()));
if (operand.IsImmediate()) {
int64_t immediate = operand.immediate();
unsigned reg_size = rd.SizeInBits();
Condition cond,
ConditionalCompareOp op) {
Instr ccmpop;
- ASSERT(!operand.NeedsRelocation());
+ ASSERT(!operand.NeedsRelocation(isolate()));
if (operand.IsImmediate()) {
int64_t immediate = operand.immediate();
ASSERT(IsImmConditionalCompare(immediate));
Instr op) {
ASSERT(operand.IsShiftedRegister());
ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
- ASSERT(!operand.NeedsRelocation());
+ ASSERT(!operand.NeedsRelocation(isolate()));
Emit(SF(rd) | op | Flags(S) |
ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
Rm(operand.reg()) | Rn(rn) | Rd(rd));
const Operand& operand,
FlagsUpdate S,
Instr op) {
- ASSERT(!operand.NeedsRelocation());
+ ASSERT(!operand.NeedsRelocation(isolate()));
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
if (!RelocInfo::IsNone(rmode)) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- if (!Serializer::enabled() && !emit_debug_code()) {
+ if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
return;
}
}
// Relocation information.
RelocInfo::Mode rmode() const { return rmode_; }
void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; }
- bool NeedsRelocation() const;
+ bool NeedsRelocation(Isolate* isolate) const;
// Helpers
inline static Operand UntagSmi(Register smi);
return false;
}
- static bool IsSafeForSnapshot(CpuFeature f) {
+ static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
return (IsSupported(f) &&
- (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ (!Serializer::enabled(isolate) || !IsFoundByRuntimeProbingOnly(f)));
}
// I and D cache line size in bytes.
int properties_count = constant_properties->length() / 2;
const int max_cloned_properties =
FastCloneShallowObjectStub::kMaximumClonedProperties;
- if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
- flags != ObjectLiteral::kFastElements ||
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ Serializer::enabled(isolate()) || flags != ObjectLiteral::kFastElements ||
properties_count > max_cloned_properties) {
__ Push(x3, x2, x1, x0);
__ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
__ CallStub(&stub);
__ IncrementCounter(
isolate()->counters()->cow_arrays_created_stub(), 1, x10, x11);
- } else if ((expr->depth() > 1) || Serializer::enabled() ||
+ } else if ((expr->depth() > 1) || Serializer::enabled(isolate()) ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ Mov(x0, Smi::FromInt(flags));
__ Push(x3, x2, x1, x0);
// the it was just a plain use), so it is free to move the split child into
// the same register that is used for the use-at-start.
// See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
int fixed = 0;
int used_at_start = 0;
for (UseIterator it(instr); !it.Done(); it.Advance()) {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return IsCall();
+ }
bool IsMarkedAsCall() const { return IsCall(); }
virtual bool HasResult() const = 0;
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
LogicalOp op) {
UseScratchRegisterScope temps(this);
- if (operand.NeedsRelocation()) {
+ if (operand.NeedsRelocation(isolate())) {
Register temp = temps.AcquireX();
LoadRelocated(temp, operand);
Logical(rd, rn, temp, op);
UseScratchRegisterScope temps(this);
Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
- if (operand.NeedsRelocation()) {
+ if (operand.NeedsRelocation(isolate())) {
LoadRelocated(dst, operand);
} else if (operand.IsImmediate()) {
void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
ASSERT(allow_macro_instructions_);
- if (operand.NeedsRelocation()) {
+ if (operand.NeedsRelocation(isolate())) {
LoadRelocated(rd, operand);
mvn(rd, rd);
Condition cond,
ConditionalCompareOp op) {
ASSERT((cond != al) && (cond != nv));
- if (operand.NeedsRelocation()) {
+ if (operand.NeedsRelocation(isolate())) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
LoadRelocated(temp, operand);
FlagsUpdate S,
AddSubOp op) {
if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
- !operand.NeedsRelocation() && (S == LeaveFlags)) {
+ !operand.NeedsRelocation(isolate()) && (S == LeaveFlags)) {
// The instruction would be a nop. Avoid generating useless code.
return;
}
- if (operand.NeedsRelocation()) {
+ if (operand.NeedsRelocation(isolate())) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
LoadRelocated(temp, operand);
ASSERT(rd.SizeInBits() == rn.SizeInBits());
UseScratchRegisterScope temps(this);
- if (operand.NeedsRelocation()) {
+ if (operand.NeedsRelocation(isolate())) {
Register temp = temps.AcquireX();
LoadRelocated(temp, operand);
AddSubWithCarryMacro(rd, rn, temp, S, op);
#ifdef DEBUG
CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
: assembler_(assembler) {
- ASSERT(CpuFeatures::IsSafeForSnapshot(f));
+ ASSERT(CpuFeatures::IsSafeForSnapshot(assembler_->isolate(), f));
old_enabled_ = assembler_->enabled_cpu_features();
uint64_t mask = static_cast<uint64_t>(1) << f;
// TODO(svenpanne) This special case below doesn't belong here!
: isolate_(isolate), old_cross_compile_(CpuFeatures::cross_compile_) {
// CpuFeatures is a global singleton, therefore this is only safe in
// single threaded code.
- ASSERT(Serializer::enabled());
+ ASSERT(Serializer::enabled(isolate));
uint64_t mask = static_cast<uint64_t>(1) << f;
CpuFeatures::cross_compile_ |= mask;
USE(isolate_);
public:
explicit NoTrackDoubleFieldsForSerializerScope(Isolate* isolate)
: isolate_(isolate), flag_(FLAG_track_double_fields) {
- if (Serializer::enabled()) {
+ if (Serializer::enabled(isolate)) {
// Disable tracking double fields because heap numbers treated as
// immutable by the serializer.
FLAG_track_double_fields = false;
}
- USE(isolate_);
}
+
~NoTrackDoubleFieldsForSerializerScope() {
- if (Serializer::enabled()) {
+ if (Serializer::enabled(isolate_)) {
FLAG_track_double_fields = flag_;
}
}
// We can't (de-)serialize typed arrays currently, but we are lucky: The state
// of the random number generator needs no initialization during snapshot
// creation time and we don't need trigonometric functions then.
- if (!Serializer::enabled()) {
+ if (!Serializer::enabled(isolate)) {
// Initially seed the per-context random number generator using the
// per-isolate random number generator.
const int num_elems = 2;
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) |
SkipFastPathBits::encode(skip_fastpath) |
- SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
- CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
+ SSEBits::encode(
+ CpuFeatures::IsSafeForSnapshot(isolate, SSE2) ?
+ CpuFeatures::IsSafeForSnapshot(isolate, SSE3) ? 2 : 1 : 0);
}
Register source() {
// TODO(yangguo): check whether those heuristics are still up-to-date.
// We do not shrink objects that go into a snapshot (yet), so we adjust
// the estimate conservatively.
- if (Serializer::enabled()) {
+ if (Serializer::enabled(shared->GetIsolate())) {
estimate += 2;
} else if (FLAG_clever_optimizations) {
// Inobject slack tracking will reclaim redundant inobject space later,
// cache in the snapshot to keep boot-time memory usage down.
// If we expand the number string cache already while creating
// the snapshot then that didn't work out.
- ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
+ ASSERT(!Serializer::enabled(isolate()) || FLAG_extra_code != NULL);
Handle<FixedArray> new_cache = NewFixedArray(full_size, TENURED);
isolate()->heap()->set_number_string_cache(*new_cache);
return;
// Skip saved double registers.
if (safepoint_entry.has_doubles()) {
// Number of doubles not known at snapshot time.
- ASSERT(!Serializer::enabled());
+ ASSERT(!Serializer::enabled(isolate()));
parameters_base += DoubleRegister::NumAllocatableRegisters() *
kDoubleSize / kPointerSize;
}
// we disable the production of debug code in the full compiler if we are
// either generating a snapshot or we booted from a snapshot.
generate_debug_code_ = FLAG_debug_code &&
- !Serializer::enabled() &&
+ !Serializer::enabled(isolate()) &&
!Snapshot::HaveASnapshotToStartFrom();
masm_->set_emit_debug_code(generate_debug_code_);
masm_->set_predictable_code_size(true);
return false;
}
- if (!FLAG_incremental_marking || Serializer::enabled()) {
+ if (!FLAG_incremental_marking || Serializer::enabled(isolate_)) {
return IdleGlobalGC();
}
if (call_type == kCallApiFunction) {
// Cannot embed a direct reference to the global proxy map
// as it maybe dropped on deserialization.
- CHECK(!Serializer::enabled());
+ CHECK(!Serializer::enabled(isolate()));
ASSERT_EQ(0, receiver_maps->length());
receiver_maps->Add(handle(
function->context()->global_object()->global_receiver()->map()),
if (shared->strict_mode() == SLOPPY && !shared->native()) {
// Cannot embed a direct reference to the global proxy
// as is it dropped on deserialization.
- CHECK(!Serializer::enabled());
+ CHECK(!Serializer::enabled(isolate()));
Handle<JSObject> global_receiver(
target->context()->global_object()->global_receiver());
return Add<HConstant>(global_receiver);
ASSERT(!RelocInfo::IsNone(rmode));
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- if (!Serializer::enabled() && !emit_debug_code()) {
+ if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
return;
}
}
return Check(f, found_by_runtime_probing_only_);
}
- static bool IsSafeForSnapshot(CpuFeature f) {
+ static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
return Check(f, cross_compile_) ||
(IsSupported(f) &&
- (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ (!Serializer::enabled(isolate) || !IsFoundByRuntimeProbingOnly(f)));
}
static bool VerifyCrossCompiling() {
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
- if (Serializer::enabled()) {
+ if (Serializer::enabled(masm->isolate())) {
PlatformFeatureScope sse2(masm->isolate(), SSE2);
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
} else {
// It is important that the store buffer overflow stubs are generated first.
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
- if (Serializer::enabled()) {
+ if (Serializer::enabled(isolate)) {
PlatformFeatureScope sse2(isolate, SSE2);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
Register character,
Register scratch) {
// hash = (seed + character) + ((seed + character) << 10);
- if (Serializer::enabled()) {
+ if (Serializer::enabled(masm->isolate())) {
__ LoadRoot(scratch, Heap::kHashSeedRootIndex);
__ SmiUntag(scratch);
__ add(scratch, character);
Isolate* isolate) {
StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs);
stub.GetCode();
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ if (CpuFeatures::IsSafeForSnapshot(isolate, SSE2)) {
StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
stub2.GetCode();
}
public:
StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
: PlatformCodeStub(isolate), save_doubles_(save_fp) {
- ASSERT(CpuFeatures::IsSafeForSnapshot(SSE2) || save_fp == kDontSaveFPRegs);
+ ASSERT(CpuFeatures::IsSafeForSnapshot(isolate, SSE2) ||
+ save_fp == kDontSaveFPRegs);
}
void Generate(MacroAssembler* masm);
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
- ASSERT(CpuFeatures::IsSafeForSnapshot(SSE2) || fp_mode == kDontSaveFPRegs);
+ ASSERT(CpuFeatures::IsSafeForSnapshot(isolate, SSE2) ||
+ fp_mode == kDontSaveFPRegs);
}
enum Mode {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ Serializer::enabled(isolate()) ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
allocation_site_mode,
length);
__ CallStub(&stub);
- } else if (expr->depth() > 1 || Serializer::enabled() ||
+ } else if (expr->depth() > 1 || Serializer::enabled(isolate()) ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
namespace internal {
-static SaveFPRegsMode GetSaveFPRegsMode() {
+static SaveFPRegsMode GetSaveFPRegsMode(Isolate* isolate) {
// We don't need to save floating point regs when generating the snapshot
- return CpuFeatures::IsSafeForSnapshot(SSE2) ? kSaveFPRegs : kDontSaveFPRegs;
+ return CpuFeatures::IsSafeForSnapshot(isolate, SSE2) ? kSaveFPRegs
+ : kDontSaveFPRegs;
}
x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
} else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
!instr->IsGap() && !instr->IsReturn()) {
- if (instr->ClobbersDoubleRegisters()) {
+ if (instr->ClobbersDoubleRegisters(isolate())) {
if (instr->HasDoubleRegisterResult()) {
ASSERT_EQ(1, x87_stack_.depth());
} else {
void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
- if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
+ if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
bool double_inputs = instr->HasDoubleRegisterInput();
// Flush stack from tos down, since FreeX87() will mess with tos
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
ASSERT(instr->result()->IsDoubleRegister());
- if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ if (!CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) {
__ push(Immediate(upper));
__ push(Immediate(lower));
X87Register reg = ToX87Register(instr->result());
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
factory()->heap_number_map());
__ j(not_equal, ¬_heap_number, Label::kNear);
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
} else {
offset,
value,
temp,
- GetSaveFPRegsMode(),
+ GetSaveFPRegsMode(isolate()),
EMIT_REMEMBERED_SET,
check_needed);
}
HeapObject::kMapOffset,
temp_map,
temp,
- GetSaveFPRegsMode(),
+ GetSaveFPRegsMode(isolate()),
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
offset,
value,
temp,
- GetSaveFPRegsMode(),
+ GetSaveFPRegsMode(isolate()),
EMIT_REMEMBERED_SET,
check_needed);
}
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS) {
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister xmm_scratch = double_scratch0();
__ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
}
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
__ movsd(operand, ToDoubleRegister(instr->value()));
} else {
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value = ToDoubleRegister(instr->value());
__ RecordWrite(elements,
key,
value,
- GetSaveFPRegsMode(),
+ GetSaveFPRegsMode(isolate()),
EMIT_REMEMBERED_SET,
check_needed);
}
Register result_reg = ToRegister(result);
if (instr->truncating()) {
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
__ TruncateDoubleToI(result_reg, input_reg);
}
} else {
Label bailout, done;
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(result);
Label bailout, done;
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
XMMRegister xmm_scratch = double_scratch0();
}
MacroAssembler* masm() const { return masm_; }
+ Isolate* isolate() const { return masm_->isolate(); }
private:
int ArrayIndex(X87Register reg);
// the it was just a plain use), so it is free to move the split child into
// the same register that is used for the use-at-start.
// See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
int fixed = 0;
int used_at_start = 0;
for (UseIterator it(instr); !it.Done(); it.Advance()) {
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (!CpuFeatures::IsSafeForSnapshot(SSE2) && instr->IsGoto() &&
+ if (!CpuFeatures::IsSafeForSnapshot(isolate(), SSE2) && instr->IsGoto() &&
LGoto::cast(instr)->jumps_to_join()) {
// TODO(olivf) Since phis of spilled values are joined as registers
// (not in the stack slot), we need to allow the goto gaps to keep one
// x87 register alive. To ensure all other values are still spilled, we
// insert a fpu register barrier right before.
- LClobberDoubles* clobber = new(zone()) LClobberDoubles();
+ LClobberDoubles* clobber = new(zone()) LClobberDoubles(isolate());
clobber->set_hydrogen_value(current);
chunk_->AddInstruction(clobber, current_block_);
}
LOperand* value = UseRegister(val);
bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp =
- (CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating)
+ (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2) && !truncating)
? FixedTemp(xmm1) : NULL;
LInstruction* result =
DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp));
} else {
ASSERT(to.IsInteger32());
bool truncating = instr->CanTruncateToInt32();
- bool needs_temp = CpuFeatures::IsSafeForSnapshot(SSE2) && !truncating;
+ bool needs_temp =
+ CpuFeatures::IsSafeForSnapshot(isolate(), SSE2) && !truncating;
LOperand* value = needs_temp ? UseTempRegister(val) : UseRegister(val);
LOperand* temp = needs_temp ? TempRegister() : NULL;
LInstruction* result =
return UseFixed(instr->value(), eax);
}
- if (!CpuFeatures::IsSafeForSnapshot(SSE2) &&
+ if (!CpuFeatures::IsSafeForSnapshot(isolate(), SSE2) &&
IsDoubleOrFloatElementsKind(elements_kind)) {
return UseRegisterAtStart(instr->value());
}
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters() const {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return IsCall() ||
// We only have rudimentary X87Stack tracking, thus in general
// cannot handle phi-nodes.
- (!CpuFeatures::IsSafeForSnapshot(SSE2) && IsControl());
+ (!CpuFeatures::IsSafeForSnapshot(isolate, SSE2) && IsControl());
}
virtual bool HasResult() const = 0;
class LClobberDoubles V8_FINAL : public LTemplateInstruction<0, 0, 0> {
public:
- LClobberDoubles() { ASSERT(!CpuFeatures::IsSafeForSnapshot(SSE2)); }
+ explicit LClobberDoubles(Isolate* isolate) {
+ ASSERT(!CpuFeatures::IsSafeForSnapshot(isolate, SSE2));
+ }
- virtual bool ClobbersDoubleRegisters() const { return true; }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return true;
+ }
DECLARE_CONCRETE_INSTRUCTION(ClobberDoubles, "clobber-d")
};
virtual bool IsControl() const V8_OVERRIDE { return true; }
int block_id() const { return block_->block_id(); }
- virtual bool ClobbersDoubleRegisters() const { return false; }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return false;
+ }
bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
next_block_(NULL),
allocator_(allocator) { }
+ Isolate* isolate() const { return graph_->isolate(); }
+
// Build the sequence for the graph.
LPlatformChunk* Build();
isolate()->factory()->heap_number_map());
j(not_equal, lost_precision, Label::kNear);
- if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ if (CpuFeatures::IsSafeForSnapshot(isolate(), SSE2)) {
ASSERT(!temp.is(no_xmm_reg));
CpuFeatureScope scope(this, SSE2);
// Note: r0 will contain hash code
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
// Xor original key with a seed.
- if (Serializer::enabled()) {
+ if (Serializer::enabled(isolate())) {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(scratch, Immediate(Heap::kHashSeedRootIndex));
JSGlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
- if (Serializer::enabled()) {
+ if (Serializer::enabled(masm->isolate())) {
__ mov(scratch, Immediate(cell));
__ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
Immediate(the_hole));
HandlerFrontendHeader(type, receiver(), global, name, &miss);
// Get the value from the cell.
- if (Serializer::enabled()) {
+ if (Serializer::enabled(isolate())) {
__ mov(eax, Immediate(cell));
__ mov(eax, FieldOperand(eax, PropertyCell::kValueOffset));
} else {
ExtraICState BinaryOpIC::State::GetExtraICState() const {
bool sse2 = (Max(result_kind_, Max(left_kind_, right_kind_)) > SMI &&
- CpuFeatures::IsSafeForSnapshot(SSE2));
+ CpuFeatures::IsSafeForSnapshot(isolate(), SSE2));
ExtraICState extra_ic_state =
SSE2Field::encode(sse2) |
OpField::encode(op_ - FIRST_TOKEN) |
return FLAG_incremental_marking &&
FLAG_incremental_marking_steps &&
heap_->gc_state() == Heap::NOT_IN_GC &&
- !Serializer::enabled() &&
+ !Serializer::enabled(heap_->isolate()) &&
heap_->isolate()->IsInitialized() &&
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
}
ASSERT(FLAG_incremental_marking_steps);
ASSERT(state_ == STOPPED);
ASSERT(heap_->gc_state() == Heap::NOT_IN_GC);
- ASSERT(!Serializer::enabled());
+ ASSERT(!Serializer::enabled(heap_->isolate()));
ASSERT(heap_->isolate()->IsInitialized());
ResetStepCounters();
has_fatal_error_ = false;
use_crankshaft_ = FLAG_crankshaft
- && !Serializer::enabled()
+ && !Serializer::enabled(this)
&& CpuFeatures::SupportsCrankshaft();
if (function_entry_hook() != NULL) {
kDeoptTableSerializeEntryCount - 1);
}
- if (!Serializer::enabled()) {
+ if (!Serializer::enabled(this)) {
// Ensure that all stubs which need to be generated ahead of time, but
// cannot be serialized into the snapshot have been generated.
HandleScope scope(this);
}
}
- if (instr->ClobbersDoubleRegisters()) {
+ if (instr->ClobbersDoubleRegisters(isolate())) {
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
if (output == NULL || !output->IsDoubleRegister() ||
output->index() != i) {
if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- if (!Serializer::enabled() && !emit_debug_code()) {
+ if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
return;
}
}
return Check(f, found_by_runtime_probing_only_);
}
- static bool IsSafeForSnapshot(CpuFeature f) {
+ static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
return Check(f, cross_compile_) ||
(IsSupported(f) &&
- (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ (!Serializer::enabled(isolate) || !IsFoundByRuntimeProbingOnly(f)));
}
static bool VerifyCrossCompiling() {
#if defined(V8_HOST_ARCH_MIPS)
OS::MemCopyUint8Function CreateMemCopyUint8Function(
- OS::MemCopyUint8Function stub) {
+ bool serializer_enabled,
+ OS::MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
- if (Serializer::enabled()) {
- return stub;
- }
+ if (serializer_enabled) return stub;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(3 * KB, &actual_size, true));
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
- flags != ObjectLiteral::kFastElements ||
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ Serializer::enabled(isolate()) || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
__ CallStub(&stub);
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
1, a1, a2);
- } else if (expr->depth() > 1 || Serializer::enabled() ||
+ } else if (expr->depth() > 1 || Serializer::enabled(isolate()) ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ li(a0, Operand(Smi::FromInt(flags)));
__ Push(a3, a2, a1, a0);
// the it was just a plain use), so it is free to move the split child into
// the same register that is used for the use-at-start.
// See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
int fixed = 0;
int used_at_start = 0;
for (UseIterator it(instr); !it.Done(); it.Advance()) {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return IsCall();
+ }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return IsCall(); }
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
next_block_(NULL),
allocator_(allocator) { }
+ Isolate* isolate() const { return graph_->isolate(); }
+
// Build the sequence for the graph.
LPlatformChunk* Build();
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
&& (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() ||
- Serializer::enabled() || target->ic_age() != heap->global_ic_age() ||
+ Serializer::enabled(heap->isolate()) ||
+ target->ic_age() != heap->global_ic_age() ||
target->is_invalidated_weak_stub())) {
- IC::Clear(target->GetIsolate(), rinfo->pc(),
- rinfo->host()->constant_pool());
+ IC::Clear(heap->isolate(), rinfo->pc(), rinfo->host()->constant_pool());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
}
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
if (FLAG_cleanup_code_caches_at_gc) {
code->ClearTypeFeedbackInfo(heap);
}
- if (FLAG_age_code && !Serializer::enabled()) {
+ if (FLAG_age_code && !Serializer::enabled(heap->isolate())) {
code->MakeOlder(heap->mark_compact_collector()->marking_parity());
}
code->CodeIterateBody<StaticVisitor>(heap);
set_live_objects_may_exist(true);
// No tracking during the snapshot construction phase.
- if (Serializer::enabled()) return;
+ Isolate* isolate = GetIsolate();
+ if (Serializer::enabled(isolate)) return;
if (map->unused_property_fields() == 0) return;
set_construction_count(kGenerousAllocationCount);
}
set_initial_map(map);
- Builtins* builtins = map->GetHeap()->isolate()->builtins();
+ Builtins* builtins = isolate->builtins();
ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
construct_stub());
set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
&OS::MemCopyUint16Uint8Wrapper;
// Defined in codegen-arm.cc.
OS::MemCopyUint8Function CreateMemCopyUint8Function(
+ bool serializer_enabled,
OS::MemCopyUint8Function stub);
OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
OS::MemCopyUint16Uint8Function stub);
OS::MemCopyUint8Function OS::memcopy_uint8_function = &OS::MemCopyUint8Wrapper;
// Defined in codegen-mips.cc.
OS::MemCopyUint8Function CreateMemCopyUint8Function(
+ bool serializer_enabled,
OS::MemCopyUint8Function stub);
#endif
-void OS::PostSetUp() {
+void OS::PostSetUp(bool serializer_enabled) {
#if V8_TARGET_ARCH_IA32
OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
if (generated_memmove != NULL) {
}
#elif defined(V8_HOST_ARCH_ARM)
OS::memcopy_uint8_function =
- CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper);
+ CreateMemCopyUint8Function(serializer_enabled, &OS::MemCopyUint8Wrapper);
OS::memcopy_uint16_uint8_function =
CreateMemCopyUint16Uint8Function(&OS::MemCopyUint16Uint8Wrapper);
#elif defined(V8_HOST_ARCH_MIPS)
OS::memcopy_uint8_function =
- CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper);
+ CreateMemCopyUint8Function(serializer_enabled, &OS::MemCopyUint8Wrapper);
#endif
// fast_exp is initialized lazily.
init_fast_sqrt_function();
}
-void OS::PostSetUp() {
+void OS::PostSetUp(bool serializer_enabled) {
// Math functions depend on CPU features therefore they are initialized after
// CPU.
MathSetup();
public:
// Initializes the platform OS support that depend on CPU features. This is
// called after CPU initialization.
- static void PostSetUp();
+ static void PostSetUp(bool serializer_enabled);
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
// deserialized objects.
void SerializerDeserializer::Iterate(Isolate* isolate,
ObjectVisitor* visitor) {
- if (Serializer::enabled()) return;
+ if (Serializer::enabled(isolate)) return;
for (int i = 0; ; i++) {
if (isolate->serialize_partial_snapshot_cache_length() <= i) {
// Extend the array ready to get a value from the visitor when
static void InitializeOncePerProcess();
static void TearDown();
- static bool enabled() {
+ static bool enabled(Isolate* isolate) {
SerializationState state = static_cast<SerializationState>(
NoBarrier_Load(&serialization_state_));
ASSERT(state != SERIALIZER_STATE_UNINITIALIZED);
platform_ = new DefaultPlatform;
#endif
Sampler::SetUp();
- CpuFeatures::Probe(Serializer::enabled());
- OS::PostSetUp();
+ // TODO(svenpanne) Clean this up when Serializer is a real object.
+ bool serializer_enabled = Serializer::enabled(NULL);
+ CpuFeatures::Probe(serializer_enabled);
+ OS::PostSetUp(serializer_enabled);
ElementsAccessor::InitializeOncePerProcess();
LOperand::SetUpCaches();
SetUpJSCallerSavedCodeData();
ASSERT(!RelocInfo::IsNone(rmode));
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
// Don't record external references unless the heap will be serialized.
- if (!Serializer::enabled() && !emit_debug_code()) {
+ if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
return;
}
} else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) {
return Check(f, found_by_runtime_probing_only_);
}
- static bool IsSafeForSnapshot(CpuFeature f) {
+ static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
return Check(f, cross_compile_) ||
(IsSupported(f) &&
- (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
+ (!Serializer::enabled(isolate) || !IsFoundByRuntimeProbingOnly(f)));
}
static bool VerifyCrossCompiling() {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
- flags != ObjectLiteral::kFastElements ||
+ if (expr->may_store_doubles() || expr->depth() > 1 ||
+ Serializer::enabled(isolate()) || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
allocation_site_mode,
length);
__ CallStub(&stub);
- } else if (expr->depth() > 1 || Serializer::enabled() ||
+ } else if (expr->depth() > 1 || Serializer::enabled(isolate()) ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
// the it was just a plain use), so it is free to move the split child into
// the same register that is used for the use-at-start.
// See https://code.google.com/p/chromium/issues/detail?id=201590
- if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) {
+ if (!(instr->ClobbersRegisters() &&
+ instr->ClobbersDoubleRegisters(isolate()))) {
int fixed = 0;
int used_at_start = 0;
for (UseIterator it(instr); !it.Done(); it.Advance()) {
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); }
- virtual bool ClobbersDoubleRegisters() const { return IsCall(); }
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+ return IsCall();
+ }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
+ virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs;
}
next_block_(NULL),
allocator_(allocator) { }
+ Isolate* isolate() const { return graph_->isolate(); }
+
// Build the sequence for the graph.
LPlatformChunk* Build();
Operand MacroAssembler::ExternalOperand(ExternalReference target,
Register scratch) {
- if (root_array_available_ && !Serializer::enabled()) {
+ if (root_array_available_ && !Serializer::enabled(isolate())) {
int64_t delta = RootRegisterDelta(target);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
return Operand(kRootRegister, static_cast<int32_t>(delta));
void MacroAssembler::Load(Register destination, ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
+ if (root_array_available_ && !Serializer::enabled(isolate())) {
int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
void MacroAssembler::Store(ExternalReference destination, Register source) {
- if (root_array_available_ && !Serializer::enabled()) {
+ if (root_array_available_ && !Serializer::enabled(isolate())) {
int64_t delta = RootRegisterDelta(destination);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
+ if (root_array_available_ && !Serializer::enabled(isolate())) {
int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
int MacroAssembler::LoadAddressSize(ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
+ if (root_array_available_ && !Serializer::enabled(isolate())) {
// This calculation depends on the internals of LoadAddress.
// It's correctness is ensured by the asserts in the Call
// instruction below.
void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
- if (is_int32(address) && !Serializer::enabled()) {
+ if (is_int32(address) && !Serializer::enabled(isolate())) {
if (emit_debug_code()) {
Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
}
Condition cc,
Label* branch,
Label::Distance distance) {
- if (Serializer::enabled()) {
+ if (Serializer::enabled(isolate())) {
// Can't do arithmetic on external references if it might get serialized.
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker