__ j(not_zero, &loop);
} else {
__ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ // On windows, you may not access the stack more than one page below
+ // the most recently mapped page. To make the allocated area randomly
+ // accessible, we write to each page in turn (the value is irrelevant).
+ const int kPageSize = 4 * KB;
+ for (int offset = slots * kPointerSize - kPageSize;
+ offset > 0;
+ offset -= kPageSize) {
+ __ mov(Operand(esp, offset), eax);
+ }
+#endif
}
}
bool HasAllocatedSpillOperand() const {
return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
}
+
LOperand* GetSpillOperand() const { return spill_operand_; }
void SetSpillOperand(LOperand* operand) {
ASSERT(!operand->IsUnallocated());
bool Covers(LifetimePosition position);
LifetimePosition FirstIntersection(LiveRange* other);
-
// Add a new interval or a new use position to this live range.
void EnsureInterval(LifetimePosition start, LifetimePosition end);
void AddUseInterval(LifetimePosition start, LifetimePosition end);
reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
const Address kFromSpaceZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
-const uint64_t kDebugZapValue = 0xbadbaddbbadbaddb;
+const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
+const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeed);
#else
const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
bool LCodeGen::GeneratePrologue() {
- Abort("Unimplemented: %s", "GeneratePrologue");
- return false;
+ ASSERT(is_generating());
+
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
+#endif
+
+ __ push(rbp); // Caller's frame pointer.
+ __ movq(rbp, rsp);
+ __ push(rsi); // Callee's context.
+ __ push(rdi); // Callee's JS function.
+
+ // Reserve space for the stack slots needed by the code.
+ int slots = StackSlotCount();
+ if (slots > 0) {
+ if (FLAG_debug_code) {
+ __ movl(rax, Immediate(slots));
+ __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
+ Label loop;
+ __ bind(&loop);
+ __ push(kScratchRegister);
+ __ decl(rax);
+ __ j(not_zero, &loop);
+ } else {
+ __ subq(rsp, Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ // On windows, you may not access the stack more than one page below
+ // the most recently mapped page. To make the allocated area randomly
+ // accessible, we write to each page in turn (the value is irrelevant).
+ const int kPageSize = 4 * KB;
+ for (int offset = slots * kPointerSize - kPageSize;
+ offset > 0;
+ offset -= kPageSize) {
+ __ moveq(Operand(rsp, offset), rax);
+ }
+#endif
+ }
+ }
+
+ // Trace the call.
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ return !is_aborted();
}
int LChunk::GetNextSpillIndex(bool is_double) {
- // Need to consider what index means: Is it 32 bit or 64 bit index?
- UNIMPLEMENTED();
- return 0;
+ return spill_slot_count_++;
}
LOperand* LChunk::GetNextSpillSlot(bool is_double) {
- UNIMPLEMENTED();
- return NULL;
+ // All stack slots are Double stack slots on x64.
+ // Alternatively, at some point, start using half-size
+ // stack slots for int32 values.
+ int index = GetNextSpillIndex(is_double);
+ if (is_double) {
+ return LDoubleStackSlot::Create(index);
+ } else {
+ return LStackSlot::Create(index);
+ }
}
return NULL;
}
+
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
ASSERT(is_building());
current_block_ = block;