// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+ // Dummy for cross platform compatibility.
+ void set_predictable_code_size(bool value) { }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(r1, Heap::kWithContextMapRootIndex);
// in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, r1);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ ldr(r2, location);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (FLAG_debug_code) __ AbortIfSmi(r0);
+ if (generate_debug_code_) __ AbortIfSmi(r0);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ AbortIfNotString(r0);
}
// string_length: Accumulated sum of string lengths (smi).
// element: Current array element.
// elements_end: Array end.
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ cmp(array_length, Operand(0));
__ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
}
shared->set_dont_inline(lit->flags()->Contains(kDontInline));
shared->set_ast_node_count(lit->ast_node_count());
- if (V8::UseCrankshaft()&&
+ if (V8::UseCrankshaft() &&
!function.is_null() &&
!shared->optimization_disabled()) {
// If we're asked to always optimize, we compile the optimized
#include "prettyprinter.h"
#include "scopes.h"
#include "scopeinfo.h"
+#include "snapshot.h"
#include "stub-cache.h"
namespace v8 {
}
+void FullCodeGenerator::Initialize() {
+ // The generation of debug code must match between the snapshot code and the
+ // code that is generated later. This is assumed by the debugger when it is
+ // calculating PC offsets after generating a debug version of code. Therefore
+ // we disable the production of debug code in the full compiler if we are
+ // either generating a snapshot or we booted from a snapshot.
+ generate_debug_code_ = FLAG_debug_code &&
+ !Serializer::enabled() &&
+ !Snapshot::HaveASnapshotToStartFrom();
+ masm_->set_emit_debug_code(generate_debug_code_);
+ masm_->set_predictable_code_size(true);
+}
+
+
void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
if (type_feedback_cells_.is_empty()) return;
int length = type_feedback_cells_.length();
? info->function()->ast_node_count() : 0,
info->zone()),
ic_total_count_(0),
- zone_(info->zone()) { }
+ zone_(info->zone()) {
+ Initialize();
+ }
+
+ void Initialize();
static bool MakeCode(CompilationInfo* info);
int ic_total_count_;
Handle<FixedArray> handler_table_;
Handle<JSGlobalPropertyCell> profiling_counter_;
+ bool generate_debug_code_;
Zone* zone_;
friend class NestedStatement;
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+ // Avoids using instructions that vary in size in unpredictable ways between
+ // the snapshot and the running VM. This is needed by the full compiler so
+ // that it can recompile code with debug support and fix the PC.
+ void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
protected:
bool emit_debug_code() const { return emit_debug_code_; }
+ bool predictable_code_size() const { return predictable_code_size_ ; }
void movsd(XMMRegister dst, const Operand& src);
void movsd(const Operand& dst, XMMRegister src);
PositionsRecorder positions_recorder_;
bool emit_debug_code_;
+ bool predictable_code_size_;
friend class PositionsRecorder;
};
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
__ cmp(ebx, isolate()->factory()->with_context_map());
// in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, ecx);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (FLAG_debug_code) __ AbortIfSmi(eax);
+ if (generate_debug_code_) __ AbortIfSmi(eax);
// Check whether this map has already been checked to be safe for default
// valueOf.
__ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
- if (FLAG_debug_code) __ AbortIfNotSmi(eax);
+ if (generate_debug_code_) __ AbortIfNotSmi(eax);
context()->Plug(eax);
}
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ AbortIfNotString(eax);
}
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ AbortIfNotString(eax);
}
// Loop condition: while (index < length).
// Live loop registers: index, array_length, string,
// scratch, string_length, elements.
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ cmp(index, array_length);
__ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
}
SaveFPRegsMode save_fp,
MacroAssembler::RememberedSetFinalAction and_then) {
Label done;
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
int3();
return;
}
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
cmp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
j(not_zero, &done, Label::kNear);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
push(mask_scratch);
and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
length);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
Check(less_equal, "Live Bytes Count overflow chunk size");
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+ // Dummy for cross platform compatibility.
+ void set_predictable_code_size(bool value) { }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ LoadRoot(t0, Heap::kWithContextMapRootIndex);
// in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, a1);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ lw(a2, location);
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (FLAG_debug_code) __ AbortIfSmi(v0);
+ if (generate_debug_code_) __ AbortIfSmi(v0);
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ AbortIfNotString(v0);
}
// string_length: Accumulated sum of string lengths (smi).
// element: Current array element.
// elements_end: Array end.
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin",
array_length, Operand(zero_reg));
}
UNCLASSIFIED,
47,
"date_cache_stamp");
+ Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
+ UNCLASSIFIED,
+ 48,
+ "address_of_pending_message_obj");
+ Add(ExternalReference::address_of_has_pending_message(isolate).address(),
+ UNCLASSIFIED,
+ 49,
+ "address_of_has_pending_message");
+ Add(ExternalReference::address_of_pending_message_script(isolate).address(),
+ UNCLASSIFIED,
+ 50,
+ "pending_message_script");
}
: AssemblerBase(arg_isolate),
code_targets_(100),
positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
+ emit_debug_code_(FLAG_debug_code),
+ predictable_code_size_(false) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
const int long_size = 6;
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
+ // Determine whether we can use 1-byte offsets for backwards branches,
+ // which have a max range of 128 bytes.
+
+ // We also need to check the predictable_code_size_ flag here, because
+ // on x64, when the full code generator recompiles code for debugging, some
+ // places need to be padded out to a certain size. The debugger is keeping
+ // track of how often it did this so that it can adjust return addresses on
+ // the stack, but if the size of jump instructions can also change, that's
+ // not enough and the calculated offsets would be incorrect.
+ if (is_int8(offs - short_size) && !predictable_code_size_) {
// 0111 tttn #8-bit disp.
emit(0x70 | cc);
emit((offs - short_size) & 0xFF);
if (L->is_bound()) {
int offs = L->pos() - pc_offset() - 1;
ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
+ if (is_int8(offs - short_size) && !predictable_code_size_) {
// 1110 1011 #8-bit disp.
emit(0xEB);
emit((offs - short_size) & 0xFF);
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+ // Avoids using instructions that vary in size in unpredictable ways between
+ // the snapshot and the running VM. This is needed by the full compiler so
+ // that it can recompile code with debug support and fix the PC.
+ void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
protected:
bool emit_debug_code() const { return emit_debug_code_; }
+ bool predictable_code_size() const { return predictable_code_size_; }
private:
byte* addr_at(int pos) { return buffer_ + pos; }
PositionsRecorder positions_recorder_;
bool emit_debug_code_;
+ bool predictable_code_size_;
friend class PositionsRecorder;
};
// The variable in the declaration always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
__ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
// in harmony mode.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, rcx);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
__ movq(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (FLAG_debug_code) __ AbortIfSmi(rax);
+ if (generate_debug_code_) __ AbortIfSmi(rax);
// Check whether this map has already been checked to be safe for default
// valueOf.
__ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
- if (FLAG_debug_code) __ AbortIfNotSmi(rax);
+ if (generate_debug_code_) __ AbortIfNotSmi(rax);
context()->Plug(rax);
}
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ AbortIfNotString(rax);
}
// Loop condition: while (index < array_length).
// Live loop registers: index(int32), array_length(int32), string(String*),
// scratch, string_length(int32), elements(FixedArray*).
- if (FLAG_debug_code) {
+ if (generate_debug_code_) {
__ cmpq(index, array_length);
__ Assert(below, "No empty arrays here in EmitFastAsciiArrayJoin");
}
}
-static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
+static const int kInvalidRootRegisterDelta = -1;
+
+
+intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
+ if (predictable_code_size() &&
+ (other.address() < reinterpret_cast<Address>(isolate()) ||
+ other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
+ return kInvalidRootRegisterDelta;
+ }
Address roots_register_value = kRootRegisterBias +
- reinterpret_cast<Address>(isolate->heap()->roots_array_start());
+ reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
intptr_t delta = other.address() - roots_register_value;
return delta;
}
Operand MacroAssembler::ExternalOperand(ExternalReference target,
Register scratch) {
if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(target, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(target);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
return Operand(kRootRegister, static_cast<int32_t>(delta));
}
void MacroAssembler::Load(Register destination, ExternalReference source) {
if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(source);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
void MacroAssembler::Store(ExternalReference destination, Register source) {
if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(destination, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(destination);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
return;
void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) {
if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(source);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
// This calculation depends on the internals of LoadAddress.
// It's correctness is ensured by the asserts in the Call
// instruction below.
- intptr_t delta = RootRegisterDelta(source, isolate());
- if (is_int32(delta)) {
+ intptr_t delta = RootRegisterDelta(source);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
// Operand is lea(scratch, Operand(kRootRegister, delta));
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then) {
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
int3();
return;
}
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
cmpq(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
int min_length,
Register scratch) {
ASSERT(min_length >= 0);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
cmpl(length, Immediate(min_length));
Assert(greater_equal, "Invalid min_length");
}
testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(not_zero, &done, Label::kNear);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
push(mask_scratch);
// modified. It may be the "smi 1 constant" register.
Register GetSmiConstant(Smi* value);
+ intptr_t RootRegisterDelta(ExternalReference other);
+
// Moves the smi value to the destination register.
void LoadSmiConstant(Register dst, Smi* value);
TEST(ReleaseOverReservedPages) {
i::FLAG_trace_gc = true;
+ // The optimizer can allocate stuff, messing up the test.
+ i::FLAG_crankshaft = false;
+ i::FLAG_always_opt = false;
InitializeVM();
v8::HandleScope scope;
static const int number_of_test_pages = 20;