// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-#include "lithium-allocator-inl.h"
-
-#include "hydrogen.h"
-#include "string-stream.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/lithium-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/lithium-mips.h"
-#else
-#error "Unknown architecture."
-#endif
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/hydrogen.h"
+#include "src/lithium-inl.h"
+#include "src/lithium-allocator-inl.h"
+#include "src/string-stream.h"
namespace v8 {
namespace internal {
register_beneficial_(true) {
if (operand_ != NULL && operand_->IsUnallocated()) {
LUnallocated* unalloc = LUnallocated::cast(operand_);
- requires_reg_ = unalloc->HasRegisterPolicy();
+ requires_reg_ = unalloc->HasRegisterPolicy() ||
+ unalloc->HasDoubleRegisterPolicy();
register_beneficial_ = !unalloc->HasAnyPolicy();
}
- ASSERT(pos_.IsValid());
+ DCHECK(pos_.IsValid());
}
void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
- ASSERT(Contains(pos) && pos.Value() != start().Value());
+ DCHECK(Contains(pos) && pos.Value() != start().Value());
UseInterval* after = new(zone) UseInterval(pos, end_);
after->next_ = next_;
next_ = after;
void LiveRange::Verify() const {
UsePosition* cur = first_pos_;
while (cur != NULL) {
- ASSERT(Start().Value() <= cur->pos().Value() &&
+ DCHECK(Start().Value() <= cur->pos().Value() &&
cur->pos().Value() <= End().Value());
cur = cur->next();
}
current_interval_(NULL),
last_processed_use_(NULL),
current_hint_operand_(NULL),
- spill_operand_(new(zone) LOperand()),
- spill_start_index_(kMaxInt) { }
+ spill_operand_(new (zone) LOperand()),
+ spill_start_index_(kMaxInt) {}
void LiveRange::set_assigned_register(int reg, Zone* zone) {
- ASSERT(!HasRegisterAssigned() && !IsSpilled());
+ DCHECK(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
ConvertOperands(zone);
}
void LiveRange::MakeSpilled(Zone* zone) {
- ASSERT(!IsSpilled());
- ASSERT(TopLevel()->HasAllocatedSpillOperand());
+ DCHECK(!IsSpilled());
+ DCHECK(TopLevel()->HasAllocatedSpillOperand());
spilled_ = true;
assigned_register_ = kInvalidAssignment;
ConvertOperands(zone);
bool LiveRange::HasAllocatedSpillOperand() const {
- ASSERT(spill_operand_ != NULL);
+ DCHECK(spill_operand_ != NULL);
return !spill_operand_->IsIgnored();
}
void LiveRange::SetSpillOperand(LOperand* operand) {
- ASSERT(!operand->IsUnallocated());
- ASSERT(spill_operand_ != NULL);
- ASSERT(spill_operand_->IsIgnored());
+ DCHECK(!operand->IsUnallocated());
+ DCHECK(spill_operand_ != NULL);
+ DCHECK(spill_operand_->IsIgnored());
spill_operand_->ConvertTo(operand->kind(), operand->index());
}
LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
LOperand* op = NULL;
if (HasRegisterAssigned()) {
- ASSERT(!IsSpilled());
+ DCHECK(!IsSpilled());
switch (Kind()) {
case GENERAL_REGISTERS:
op = LRegister::Create(assigned_register(), zone);
case FLOAT32x4_REGISTERS:
op = LFloat32x4Register::Create(assigned_register(), zone);
break;
+ case FLOAT64x2_REGISTERS:
+ op = LFloat64x2Register::Create(assigned_register(), zone);
+ break;
case INT32x4_REGISTERS:
op = LInt32x4Register::Create(assigned_register(), zone);
break;
UNREACHABLE();
}
} else if (IsSpilled()) {
- ASSERT(!HasRegisterAssigned());
+ DCHECK(!HasRegisterAssigned());
op = TopLevel()->GetSpillOperand();
- ASSERT(!op->IsUnallocated());
+ DCHECK(!op->IsUnallocated());
} else {
LUnallocated* unalloc = new(zone) LUnallocated(LUnallocated::NONE);
unalloc->set_virtual_register(id_);
void LiveRange::SplitAt(LifetimePosition position,
LiveRange* result,
Zone* zone) {
- ASSERT(Start().Value() < position.Value());
- ASSERT(result->IsEmpty());
+ DCHECK(Start().Value() < position.Value());
+ DCHECK(result->IsEmpty());
// Find the last interval that ends before the position. If the
// position is contained in one of the intervals in the chain, we
// split that interval and use the first part.
void LiveRange::ShortenTo(LifetimePosition start) {
LAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
- ASSERT(first_interval_ != NULL);
- ASSERT(first_interval_->start().Value() <= start.Value());
- ASSERT(start.Value() < first_interval_->end().Value());
+ DCHECK(first_interval_ != NULL);
+ DCHECK(first_interval_->start().Value() <= start.Value());
+ DCHECK(start.Value() < first_interval_->end().Value());
first_interval_->set_start(start);
}
// Order of instruction's processing (see ProcessInstructions) guarantees
// that each new use interval either precedes or intersects with
// last added interval.
- ASSERT(start.Value() < first_interval_->end().Value());
+ DCHECK(start.Value() < first_interval_->end().Value());
first_interval_->start_ = Min(start, first_interval_->start_);
first_interval_->end_ = Max(end, first_interval_->end_);
}
LOperand* op = CreateAssignedOperand(zone);
UsePosition* use_pos = first_pos();
while (use_pos != NULL) {
- ASSERT(Start().Value() <= use_pos->pos().Value() &&
+ DCHECK(Start().Value() <= use_pos->pos().Value() &&
use_pos->pos().Value() <= End().Value());
if (use_pos->HasOperand()) {
- ASSERT(op->IsRegister() || op->IsDoubleRegister() ||
+ DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
op->IsSIMD128Register() || !use_pos->RequiresRegister());
use_pos->operand()->ConvertTo(op->kind(), op->index());
}
for (UseInterval* interval = start_search;
interval != NULL;
interval = interval->next()) {
- ASSERT(interval->next() == NULL ||
+ DCHECK(interval->next() == NULL ||
interval->next()->start().Value() >= interval->start().Value());
AdvanceLastProcessedMarker(interval, position);
if (interval->Contains(position)) return true;
num_registers_(-1),
graph_(graph),
has_osr_entry_(false),
- allocation_ok_(true) { }
+ allocation_ok_(true) {}
void LAllocator::InitializeLivenessAnalysis() {
int pos,
bool is_tagged) {
TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
- ASSERT(operand->HasFixedPolicy());
+ DCHECK(operand->HasFixedPolicy());
if (operand->HasFixedSlotPolicy()) {
operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_slot_index());
} else if (operand->HasFixedRegisterPolicy()) {
LiveRange* LAllocator::FixedLiveRangeFor(int index) {
- ASSERT(index < Register::kMaxNumAllocatableRegisters);
+ DCHECK(index < Register::kMaxNumAllocatableRegisters);
LiveRange* result = fixed_live_ranges_[index];
if (result == NULL) {
result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone());
- ASSERT(result->IsFixed());
+ DCHECK(result->IsFixed());
result->kind_ = GENERAL_REGISTERS;
SetLiveRangeAssignedRegister(result, index);
fixed_live_ranges_[index] = result;
LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
- ASSERT(index < DoubleRegister::NumAllocatableRegisters());
+ DCHECK(index < DoubleRegister::NumAllocatableRegisters());
LiveRange* result = fixed_double_live_ranges_[index];
if (result == NULL) {
result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index),
chunk()->zone());
- ASSERT(result->IsFixed());
+ DCHECK(result->IsFixed());
result->kind_ = DOUBLE_REGISTERS;
SetLiveRangeAssignedRegister(result, index);
fixed_double_live_ranges_[index] = result;
} else if (cur_input->HasWritableRegisterPolicy()) {
// The live range of writable input registers always goes until the end
// of the instruction.
- ASSERT(!cur_input->IsUsedAtStart());
+ DCHECK(!cur_input->IsUsedAtStart());
LUnallocated* input_copy = cur_input->CopyUnconstrained(
chunk()->zone());
cur_input->virtual_register() - first_artificial_register_,
zone());
} else if (RequiredRegisterKind(input_copy->virtual_register()) ==
+ FLOAT64x2_REGISTERS) {
+ float64x2_artificial_registers_.Add(
+ cur_input->virtual_register() - first_artificial_register_,
+ zone());
+ } else if (RequiredRegisterKind(input_copy->virtual_register()) ==
INT32x4_REGISTERS) {
int32x4_artificial_registers_.Add(
cur_input->virtual_register() - first_artificial_register_,
}
}
} else {
- ASSERT(!IsGapAt(index));
+ DCHECK(!IsGapAt(index));
LInstruction* instr = InstructionAt(index);
if (instr != NULL) {
}
}
- if (instr->ClobbersDoubleRegisters()) {
+ if (instr->ClobbersDoubleRegisters(isolate())) {
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
if (output == NULL || !output->IsDoubleRegister() ||
output->index() != i) {
}
Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
Define(curr_position, temp, NULL);
+
+ if (temp->IsUnallocated()) {
+ LUnallocated* temp_unalloc = LUnallocated::cast(temp);
+ if (temp_unalloc->HasDoubleRegisterPolicy()) {
+ double_artificial_registers_.Add(
+ temp_unalloc->virtual_register() - first_artificial_register_,
+ zone());
+ }
+ }
}
}
}
for (int i = 0; i < phis->length(); ++i) {
HPhi* phi = phis->at(i);
LUnallocated* phi_operand =
- new(chunk()->zone()) LUnallocated(LUnallocated::NONE);
+ new (chunk()->zone()) LUnallocated(LUnallocated::NONE);
phi_operand->set_virtual_register(phi->id());
for (int j = 0; j < phi->OperandCount(); ++j) {
HValue* op = phi->OperandAt(j);
HConstant* constant = HConstant::cast(op);
operand = chunk_->DefineConstantOperand(constant);
} else {
- ASSERT(!op->EmitAtUses());
+ DCHECK(!op->EmitAtUses());
LUnallocated* unalloc =
new(chunk()->zone()) LUnallocated(LUnallocated::ANY);
unalloc->set_virtual_register(op->id());
bool LAllocator::Allocate(LChunk* chunk) {
- ASSERT(chunk_ == NULL);
+ DCHECK(chunk_ == NULL);
chunk_ = static_cast<LPlatformChunk*>(chunk);
assigned_registers_ =
new(chunk->zone()) BitVector(Register::NumAllocatableRegisters(),
void LAllocator::MeetRegisterConstraints() {
LAllocatorPhase phase("L_Register constraints", this);
- first_artificial_register_ = next_virtual_register_;
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int i = 0; i < blocks->length(); ++i) {
HBasicBlock* block = blocks->at(i);
LiveRange* cur_range = range;
while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
if (cur_range->CanCover(cur_start)) {
- ASSERT(cur_cover == NULL);
+ DCHECK(cur_cover == NULL);
cur_cover = cur_range;
}
if (cur_range->CanCover(pred_end)) {
- ASSERT(pred_cover == NULL);
+ DCHECK(pred_cover == NULL);
pred_cover = cur_range;
}
cur_range = cur_range->next();
}
if (cur_cover->IsSpilled()) return;
- ASSERT(pred_cover != NULL && cur_cover != NULL);
+ DCHECK(pred_cover != NULL && cur_cover != NULL);
if (pred_cover != cur_cover) {
LOperand* pred_op = pred_cover->CreateAssignedOperand(chunk()->zone());
LOperand* cur_op = cur_cover->CreateAssignedOperand(chunk()->zone());
if (block->predecessors()->length() == 1) {
gap = GapAt(block->first_instruction_index());
} else {
- ASSERT(pred->end()->SecondSuccessor() == NULL);
+ DCHECK(pred->end()->SecondSuccessor() == NULL);
gap = GetLastGap(pred);
// We are going to insert a move before the branch instruction.
break;
}
}
- ASSERT(hint != NULL);
+ DCHECK(hint != NULL);
LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
block->first_instruction_index());
CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
} else {
- ASSERT(chunk_->info()->IsOptimizing());
+ DCHECK(chunk_->info()->IsOptimizing());
AllowHandleDereference allow_deref;
PrintF("Function: %s\n",
chunk_->info()->function()->debug_name()->ToCString().get());
PrintF("First use is at %d\n", range->first_pos()->pos().Value());
iterator.Advance();
}
- ASSERT(!found);
+ DCHECK(!found);
}
#endif
}
LAllocatorPhase phase("L_Populate pointer maps", this);
const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
- ASSERT(SafePointsAreInOrder());
+ DCHECK(SafePointsAreInOrder());
// Iterate over all safe point positions and record a pointer
// for all spilled live ranges at this point.
for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
LifetimePosition this_end = cur->End();
if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
- ASSERT(cur->Start().InstructionIndex() >= start);
+ DCHECK(cur->Start().InstructionIndex() >= start);
}
// Most of the ranges are in order, but not all. Keep an eye on when
"at safe point %d\n",
cur->id(), cur->Start().Value(), safe_point);
LOperand* operand = cur->CreateAssignedOperand(chunk()->zone());
- ASSERT(!operand->IsStackSlot());
+ DCHECK(!operand->IsStackSlot());
map->RecordPointer(operand, chunk()->zone());
}
}
void LAllocator::AllocateRegisters() {
- ASSERT(unhandled_live_ranges_.is_empty());
+ DCHECK(unhandled_live_ranges_.is_empty());
for (int i = 0; i < live_ranges_.length(); ++i) {
if (live_ranges_[i] != NULL) {
}
}
SortUnhandled();
- ASSERT(UnhandledIsSorted());
+ DCHECK(UnhandledIsSorted());
- ASSERT(reusable_slots_.is_empty());
- ASSERT(reusable_simd128_slots_.is_empty());
- ASSERT(active_live_ranges_.is_empty());
- ASSERT(inactive_live_ranges_.is_empty());
+ DCHECK(reusable_slots_.is_empty());
+ DCHECK(reusable_simd128_slots_.is_empty());
+ DCHECK(active_live_ranges_.is_empty());
+ DCHECK(inactive_live_ranges_.is_empty());
if (mode_ == DOUBLE_REGISTERS) {
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
}
}
} else {
- ASSERT(mode_ == GENERAL_REGISTERS);
+ DCHECK(mode_ == GENERAL_REGISTERS);
for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
LiveRange* current = fixed_live_ranges_.at(i);
if (current != NULL) {
}
while (!unhandled_live_ranges_.is_empty()) {
- ASSERT(UnhandledIsSorted());
+ DCHECK(UnhandledIsSorted());
LiveRange* current = unhandled_live_ranges_.RemoveLast();
- ASSERT(UnhandledIsSorted());
+ DCHECK(UnhandledIsSorted());
LifetimePosition position = current->Start();
#ifdef DEBUG
allocation_finger_ = position;
// the register is too close to the start of live range.
SpillBetween(current, current->Start(), pos->pos());
if (!AllocationOk()) return;
- ASSERT(UnhandledIsSorted());
+ DCHECK(UnhandledIsSorted());
continue;
}
}
}
}
- ASSERT(!current->HasRegisterAssigned() && !current->IsSpilled());
+ DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
bool result = TryAllocateFreeReg(current);
if (!AllocationOk()) return;
if (FLAG_trace_alloc) {
va_list arguments;
va_start(arguments, msg);
- OS::VPrint(msg, arguments);
+ base::OS::VPrint(msg, arguments);
va_end(arguments);
}
}
return DOUBLE_REGISTERS;
} else if (value != NULL && (value->representation().IsFloat32x4())) {
return FLOAT32x4_REGISTERS;
+ } else if (value != NULL && (value->representation().IsFloat64x2())) {
+ return FLOAT64x2_REGISTERS;
} else if (value != NULL && (value->representation().IsInt32x4())) {
return INT32x4_REGISTERS;
}
} else if (float32x4_artificial_registers_.Contains(
virtual_register - first_artificial_register_)) {
return FLOAT32x4_REGISTERS;
+ } else if (float64x2_artificial_registers_.Contains(
+ virtual_register - first_artificial_register_)) {
+ return FLOAT64x2_REGISTERS;
} else if (int32x4_artificial_registers_.Contains(
virtual_register - first_artificial_register_)) {
return INT32x4_REGISTERS;
void LAllocator::AddToUnhandledSorted(LiveRange* range) {
if (range == NULL || range->IsEmpty()) return;
- ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
- ASSERT(allocation_finger_.Value() <= range->Start().Value());
+ DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
+ DCHECK(allocation_finger_.Value() <= range->Start().Value());
for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
LiveRange* cur_range = unhandled_live_ranges_.at(i);
if (range->ShouldBeAllocatedBefore(cur_range)) {
TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
unhandled_live_ranges_.InsertAt(i + 1, range, zone());
- ASSERT(UnhandledIsSorted());
+ DCHECK(UnhandledIsSorted());
return;
}
}
TraceAlloc("Add live range %d to unhandled at start\n", range->id());
unhandled_live_ranges_.InsertAt(0, range, zone());
- ASSERT(UnhandledIsSorted());
+ DCHECK(UnhandledIsSorted());
}
void LAllocator::AddToUnhandledUnsorted(LiveRange* range) {
if (range == NULL || range->IsEmpty()) return;
- ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
+ DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
unhandled_live_ranges_.Add(range, zone());
}
static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
- ASSERT(!(*a)->ShouldBeAllocatedBefore(*b) ||
+ DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) ||
!(*b)->ShouldBeAllocatedBefore(*a));
if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
void LAllocator::ActiveToHandled(LiveRange* range) {
- ASSERT(active_live_ranges_.Contains(range));
+ DCHECK(active_live_ranges_.Contains(range));
active_live_ranges_.RemoveElement(range);
TraceAlloc("Moving live range %d from active to handled\n", range->id());
FreeSpillSlot(range);
void LAllocator::ActiveToInactive(LiveRange* range) {
- ASSERT(active_live_ranges_.Contains(range));
+ DCHECK(active_live_ranges_.Contains(range));
active_live_ranges_.RemoveElement(range);
inactive_live_ranges_.Add(range, zone());
TraceAlloc("Moving live range %d from active to inactive\n", range->id());
void LAllocator::InactiveToHandled(LiveRange* range) {
- ASSERT(inactive_live_ranges_.Contains(range));
+ DCHECK(inactive_live_ranges_.Contains(range));
inactive_live_ranges_.RemoveElement(range);
TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
FreeSpillSlot(range);
void LAllocator::InactiveToActive(LiveRange* range) {
- ASSERT(inactive_live_ranges_.Contains(range));
+ DCHECK(inactive_live_ranges_.Contains(range));
inactive_live_ranges_.RemoveElement(range);
active_live_ranges_.Add(range, zone());
TraceAlloc("Moving live range %d from inactive to active\n", range->id());
for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
LiveRange* cur_inactive = inactive_live_ranges_.at(i);
- ASSERT(cur_inactive->End().Value() > current->Start().Value());
+ DCHECK(cur_inactive->End().Value() > current->Start().Value());
LifetimePosition next_intersection =
cur_inactive->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
// Register reg is available at the range start and is free until
// the range end.
- ASSERT(pos.Value() >= current->End().Value());
+ DCHECK(pos.Value() >= current->End().Value());
TraceAlloc("Assigning free reg %s to live range %d\n",
RegisterName(reg),
current->id());
for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
LiveRange* range = inactive_live_ranges_.at(i);
- ASSERT(range->End().Value() > current->Start().Value());
+ DCHECK(range->End().Value() > current->Start().Value());
LifetimePosition next_intersection = range->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = range->assigned_register();
}
// Register reg is not blocked for the whole range.
- ASSERT(block_pos[reg].Value() >= current->End().Value());
+ DCHECK(block_pos[reg].Value() >= current->End().Value());
TraceAlloc("Assigning blocked reg %s to live range %d\n",
RegisterName(reg),
current->id());
void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
- ASSERT(current->HasRegisterAssigned());
+ DCHECK(current->HasRegisterAssigned());
int reg = current->assigned_register();
LifetimePosition split_pos = current->Start();
for (int i = 0; i < active_live_ranges_.length(); ++i) {
for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
LiveRange* range = inactive_live_ranges_[i];
- ASSERT(range->End().Value() > current->Start().Value());
+ DCHECK(range->End().Value() > current->Start().Value());
if (range->assigned_register() == reg && !range->IsFixed()) {
LifetimePosition next_intersection = range->FirstIntersection(current);
if (next_intersection.IsValid()) {
LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) {
- ASSERT(!range->IsFixed());
+ DCHECK(!range->IsFixed());
TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
if (pos.Value() <= range->Start().Value()) return range;
// We can't properly connect liveranges if split occured at the end
// of control instruction.
- ASSERT(pos.IsInstructionStart() ||
+ DCHECK(pos.IsInstructionStart() ||
!chunk_->instructions()->at(pos.InstructionIndex())->IsControl());
int vreg = GetVirtualRegister();
LiveRange* LAllocator::SplitBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end) {
- ASSERT(!range->IsFixed());
+ DCHECK(!range->IsFixed());
TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
range->id(),
start.Value(),
end.Value());
LifetimePosition split_pos = FindOptimalSplitPos(start, end);
- ASSERT(split_pos.Value() >= start.Value());
+ DCHECK(split_pos.Value() >= start.Value());
return SplitRangeAt(range, split_pos);
}
LifetimePosition end) {
int start_instr = start.InstructionIndex();
int end_instr = end.InstructionIndex();
- ASSERT(start_instr <= end_instr);
+ DCHECK(start_instr <= end_instr);
// We have no choice
if (start_instr == end_instr) return end;
end.PrevInstruction().InstructionEnd());
if (!AllocationOk()) return;
- ASSERT(third_part != second_part);
+ DCHECK(third_part != second_part);
Spill(second_part);
AddToUnhandledSorted(third_part);
void LAllocator::Spill(LiveRange* range) {
- ASSERT(!range->IsSpilled());
+ DCHECK(!range->IsSpilled());
TraceAlloc("Spilling live range %d\n", range->id());
LiveRange* first = range->TopLevel();
op->kind() != LOperand::FLOAT32x4_STACK_SLOT) {
// Convert to Float32x4StackSlot.
op = LFloat32x4StackSlot::Create(op->index(), zone());
+ } else if (range->Kind() == FLOAT64x2_REGISTERS &&
+ op->kind() != LOperand::FLOAT64x2_STACK_SLOT) {
+ // Convert to Float64x2StackSlot.
+ op = LFloat64x2StackSlot::Create(op->index(), zone());
} else if (range->Kind() == INT32x4_REGISTERS &&
op->kind() != LOperand::INT32x4_STACK_SLOT) {
// Convert to Int32x4StackSlot.
if (FLAG_hydrogen_stats) {
unsigned size = allocator_->zone()->allocation_size() -
allocator_zone_start_allocation_size_;
- isolate()->GetHStatistics()->SaveTiming(name(), TimeDelta(), size);
+ isolate()->GetHStatistics()->SaveTiming(name(), base::TimeDelta(), size);
}
if (ShouldProduceTraceOutput()) {