LOperand* op = NULL;
if (HasRegisterAssigned()) {
ASSERT(!IsSpilled());
- if (assigned_double_) {
+ if (IsDouble()) {
op = LDoubleRegister::Create(assigned_register());
} else {
op = LRegister::Create(assigned_register());
void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) {
- ASSERT(Start().Value() <= position.Value());
+ ASSERT(Start().Value() < position.Value());
ASSERT(result->IsEmpty());
// Find the last interval that ends before the position. If the
// position is contained in one of the intervals in the chain, we
if (result == NULL) {
result = new LiveRange(FixedLiveRangeID(index));
ASSERT(result->IsFixed());
- result->set_assigned_register(index, false);
+ result->set_assigned_register(index, GENERAL_REGISTERS);
fixed_live_ranges_[index] = result;
}
return result;
if (result == NULL) {
result = new LiveRange(FixedDoubleLiveRangeID(index));
ASSERT(result->IsFixed());
- result->set_assigned_register(index, true);
+ result->set_assigned_register(index, DOUBLE_REGISTERS);
fixed_double_live_ranges_[index] = result;
}
return result;
}
-void LAllocator::AllocateGeneralRegisters() {
- HPhase phase("Allocate general registers", this);
- num_registers_ = Register::kNumAllocatableRegisters;
- mode_ = CPU_REGISTERS;
- AllocateRegisters();
-}
-
-
bool LAllocator::SafePointsAreInOrder() const {
const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
int safe_point = 0;
}
+void LAllocator::AllocateGeneralRegisters() {
+ HPhase phase("Allocate general registers", this);
+ num_registers_ = Register::kNumAllocatableRegisters;
+ mode_ = GENERAL_REGISTERS;
+ AllocateRegisters();
+}
+
+
void LAllocator::AllocateDoubleRegisters() {
HPhase phase("Allocate double registers", this);
num_registers_ = DoubleRegister::kNumAllocatableRegisters;
- mode_ = XMM_REGISTERS;
+ mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
for (int i = 0; i < live_ranges_.length(); ++i) {
if (live_ranges_[i] != NULL) {
- if (HasDoubleValue(live_ranges_[i]->id()) == (mode_ == XMM_REGISTERS)) {
+ if (RequiredRegisterKind(live_ranges_[i]->id()) == mode_) {
AddToUnhandledUnsorted(live_ranges_[i]);
}
}
ASSERT(active_live_ranges_.is_empty());
ASSERT(inactive_live_ranges_.is_empty());
- if (mode_ == XMM_REGISTERS) {
+ if (mode_ == DOUBLE_REGISTERS) {
for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
LiveRange* current = fixed_double_live_ranges_.at(i);
if (current != NULL) {
current->Start().NextInstruction().Value()) {
// Do not spill live range eagerly if use position that can benefit from
// the register is too close to the start of live range.
- LiveRange* part = Split(current,
- current->Start().NextInstruction(),
- pos->pos());
- Spill(current);
- AddToUnhandledSorted(part);
+ SpillBetween(current, current->Start(), pos->pos());
ASSERT(UnhandledIsSorted());
continue;
}
}
+const char* LAllocator::RegisterName(int allocation_index) {
+ ASSERT(mode_ != NONE);
+ if (mode_ == GENERAL_REGISTERS) {
+ return Register::AllocationIndexToString(allocation_index);
+ } else {
+ return DoubleRegister::AllocationIndexToString(allocation_index);
+ }
+}
+
+
void LAllocator::TraceAlloc(const char* msg, ...) {
if (FLAG_trace_alloc) {
va_list arguments;
}
-bool LAllocator::HasDoubleValue(int virtual_register) const {
+RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
HValue* value = graph()->LookupValue(virtual_register);
- if (value == NULL) return false;
- return value->representation().IsDouble();
+ if (value != NULL && value->representation().IsDouble()) {
+ return DOUBLE_REGISTERS;
+ }
+ return GENERAL_REGISTERS;
}
}
+// TryAllocateFreeReg and AllocateBlockedReg assume this
+// when allocating local arrays.
+STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
+ Register::kNumAllocatableRegisters);
+
+
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
- LifetimePosition max_pos = LifetimePosition::FromInstructionIndex(
- chunk_->instructions()->length() + 1);
- ASSERT(DoubleRegister::kNumAllocatableRegisters >=
- Register::kNumAllocatableRegisters);
- EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
- free_pos(max_pos);
+ LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
+
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ free_until_pos[i] = LifetimePosition::MaxPosition();
+ }
+
for (int i = 0; i < active_live_ranges_.length(); ++i) {
LiveRange* cur_active = active_live_ranges_.at(i);
- free_pos[cur_active->assigned_register()] =
+ free_until_pos[cur_active->assigned_register()] =
LifetimePosition::FromInstructionIndex(0);
}
cur_inactive->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = cur_inactive->assigned_register();
- free_pos[cur_reg] = Min(free_pos[cur_reg], next_intersection);
+ free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
}
- UsePosition* pos = current->FirstPosWithHint();
- if (pos != NULL) {
- LOperand* hint = pos->hint();
+ UsePosition* hinted_use = current->FirstPosWithHint();
+ if (hinted_use != NULL) {
+ LOperand* hint = hinted_use->hint();
if (hint->IsRegister() || hint->IsDoubleRegister()) {
int register_index = hint->index();
- TraceAlloc("Found reg hint %d for live range %d (free [%d, end %d[)\n",
- register_index,
- current->id(),
- free_pos[register_index].Value(),
- current->End().Value());
- if (free_pos[register_index].Value() >= current->End().Value()) {
- TraceAlloc("Assigning preferred reg %d to live range %d\n",
- register_index,
+ TraceAlloc(
+ "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
+ RegisterName(register_index),
+ free_until_pos[register_index].Value(),
+ current->id(),
+ current->End().Value());
+
+ // The desired register is free until the end of the current live range.
+ if (free_until_pos[register_index].Value() >= current->End().Value()) {
+ TraceAlloc("Assigning preferred reg %s to live range %d\n",
+ RegisterName(register_index),
current->id());
- current->set_assigned_register(register_index, mode_ == XMM_REGISTERS);
+ current->set_assigned_register(register_index, mode_);
return true;
}
}
}
- int max_reg = 0;
+ // Find the register which stays free for the longest time.
+ int reg = 0;
for (int i = 1; i < RegisterCount(); ++i) {
- if (free_pos[i].Value() > free_pos[max_reg].Value()) {
- max_reg = i;
+ if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
+ reg = i;
}
}
- if (free_pos[max_reg].InstructionIndex() == 0) {
+ LifetimePosition pos = free_until_pos[reg];
+
+ if (pos.Value() <= current->Start().Value()) {
+ // All registers are blocked.
return false;
- } else if (free_pos[max_reg].Value() >= current->End().Value()) {
- TraceAlloc("Assigning reg %d to live range %d\n", max_reg, current->id());
- current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
- } else {
- // Split the interval before first use position of max_reg and never split
- // it interval at its start position.
- LifetimePosition pos = free_pos[max_reg];
- if (pos.Value() <= current->Start().Value()) return false;
- LiveRange* second_range = Split(current, pos);
- AddToUnhandledSorted(second_range);
- current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
}
+ if (pos.Value() < current->End().Value()) {
+ // Register reg is available at the range start but becomes blocked before
+ // the range end. Split current at position where it becomes blocked.
+ LiveRange* tail = SplitAt(current, pos);
+ AddToUnhandledSorted(tail);
+ }
+
+
+ // Register reg is available at the range start and is free until
+ // the range end.
+ ASSERT(pos.Value() >= current->End().Value());
+ TraceAlloc("Assigning reg %s to live range %d\n",
+ RegisterName(reg),
+ current->id());
+ current->set_assigned_register(reg, mode_);
+
return true;
}
void LAllocator::AllocateBlockedReg(LiveRange* current) {
- LifetimePosition max_pos =
- LifetimePosition::FromInstructionIndex(
- chunk_->instructions()->length() + 1);
- ASSERT(DoubleRegister::kNumAllocatableRegisters >=
- Register::kNumAllocatableRegisters);
- EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
- use_pos(max_pos);
- EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
- block_pos(max_pos);
+ UsePosition* register_use = current->NextRegisterPosition(current->Start());
+ if (register_use == NULL) {
+ // There is no use in the current live range that requires a register.
+ // We can just spill it.
+ Spill(current);
+ return;
+ }
+
+
+ LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
+ LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
+
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
+ }
for (int i = 0; i < active_live_ranges_.length(); ++i) {
LiveRange* range = active_live_ranges_[i];
}
}
- int max_reg = 0;
+ int reg = 0;
for (int i = 1; i < RegisterCount(); ++i) {
- if (use_pos[i].Value() > use_pos[max_reg].Value()) {
- max_reg = i;
+ if (use_pos[i].Value() > use_pos[reg].Value()) {
+ reg = i;
}
}
- UsePosition* first_usage = current->NextRegisterPosition(current->Start());
- if (first_usage == NULL) {
- Spill(current);
- } else if (use_pos[max_reg].Value() < first_usage->pos().Value()) {
- SplitAndSpill(current, current->Start(), first_usage->pos());
- } else {
- if (block_pos[max_reg].Value() < current->End().Value()) {
- // Split current before blocked position.
- LiveRange* second_range = Split(current,
- current->Start(),
- block_pos[max_reg]);
- AddToUnhandledSorted(second_range);
- }
+ LifetimePosition pos = use_pos[reg];
+
+ if (pos.Value() < register_use->pos().Value()) {
+ // All registers are blocked before the first use that requires a register.
+ // Spill starting part of live range up to that use.
+ //
+ // Corner case: the first use position is equal to the start of the range.
+ // In this case we have nothing to spill and SpillBetween will just return
+ // this range to the list of unhandled ones. This will lead to the infinite
+ // loop.
+ ASSERT(current->Start().Value() < register_use->pos().Value());
+ SpillBetween(current, current->Start(), register_use->pos());
+ return;
+ }
- current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
- SplitAndSpillIntersecting(current);
+ if (block_pos[reg].Value() < current->End().Value()) {
+ // Register becomes blocked before the current range end. Split before that
+ // position.
+ LiveRange* tail = SplitBetween(current,
+ current->Start(),
+ block_pos[reg].InstructionStart());
+ AddToUnhandledSorted(tail);
}
+
+ // Register reg is not blocked for the whole range.
+ ASSERT(block_pos[reg].Value() >= current->End().Value());
+ TraceAlloc("Assigning reg %s to live range %d\n",
+ RegisterName(reg),
+ current->id());
+ current->set_assigned_register(reg, mode_);
+
+ // This register was not free. Thus we need to find and spill
+ // parts of active and inactive live regions that use the same register
+ // at the same lifetime positions as current.
+ SplitAndSpillIntersecting(current);
}
if (range->assigned_register() == reg) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
if (next_pos == NULL) {
- SplitAndSpill(range, split_pos);
+ SpillAfter(range, split_pos);
} else {
- SplitAndSpill(range, split_pos, next_pos->pos());
+ SpillBetween(range, split_pos, next_pos->pos());
}
ActiveToHandled(range);
--i;
if (next_intersection.IsValid()) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
if (next_pos == NULL) {
- SplitAndSpill(range, split_pos);
+ SpillAfter(range, split_pos);
} else {
next_intersection = Min(next_intersection, next_pos->pos());
- SplitAndSpill(range, split_pos, next_intersection);
+ SpillBetween(range, split_pos, next_intersection);
}
InactiveToHandled(range);
--i;
}
-LiveRange* LAllocator::Split(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end) {
+bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
+ return pos.IsInstructionStart() &&
+ chunk_->instructions()->at(pos.InstructionIndex())->IsLabel();
+}
+
+
+void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) {
+ UsePosition* prev_pos = prev->AddUsePosition(
+ LifetimePosition::FromInstructionIndex(pos));
+ UsePosition* next_pos = next->AddUsePosition(
+ LifetimePosition::FromInstructionIndex(pos));
+ LOperand* prev_operand = prev_pos->operand();
+ LOperand* next_operand = next_pos->operand();
+ LGap* gap = chunk_->GetGapAt(pos);
+ gap->GetOrCreateParallelMove(LGap::START)->
+ AddMove(prev_operand, next_operand);
+ next_pos->set_hint(prev_operand);
+}
+
+
+LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) {
+ ASSERT(!range->IsFixed());
+ TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
+
+ if (pos.Value() <= range->Start().Value()) return range;
+
+ LiveRange* result = LiveRangeFor(next_virtual_register_++);
+ range->SplitAt(pos, result);
+ return result;
+}
+
+
+LiveRange* LAllocator::SplitBetween(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition end) {
ASSERT(!range->IsFixed());
- TraceAlloc("Splitting live range %d in position between [%d, %d[\n",
+ TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
range->id(),
start.Value(),
end.Value());
- LifetimePosition split_pos = FindOptimalSplitPos(
- start, end.PrevInstruction().InstructionEnd());
+ LifetimePosition split_pos = FindOptimalSplitPos(start, end);
ASSERT(split_pos.Value() >= start.Value());
- return Split(range, split_pos);
+ return SplitAt(range, split_pos);
}
}
HBasicBlock* block = end_block;
- // Move to the most outside loop header.
+ // Find header of outermost loop.
while (block->parent_loop_header() != NULL &&
block->parent_loop_header()->block_id() > start_block->block_id()) {
block = block->parent_loop_header();
}
- if (block == end_block) {
- return end;
- }
+ if (block == end_block) return end;
return LifetimePosition::FromInstructionIndex(
block->first_instruction_index());
}
-bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
- return pos.IsInstructionStart() &&
- chunk_->instructions()->at(pos.InstructionIndex())->IsLabel();
+void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
+ LiveRange* second_part = SplitAt(range, pos);
+ Spill(second_part);
}
-void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) {
- UsePosition* prev_pos = prev->AddUsePosition(
- LifetimePosition::FromInstructionIndex(pos));
- UsePosition* next_pos = next->AddUsePosition(
- LifetimePosition::FromInstructionIndex(pos));
- LOperand* prev_operand = prev_pos->operand();
- LOperand* next_operand = next_pos->operand();
- LGap* gap = chunk_->GetGapAt(pos);
- gap->GetOrCreateParallelMove(LGap::START)->
- AddMove(prev_operand, next_operand);
- next_pos->set_hint(prev_operand);
-}
-
+void LAllocator::SpillBetween(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition end) {
+ ASSERT(start.Value() < end.Value());
+ LiveRange* second_part = SplitAt(range, start);
-LiveRange* LAllocator::Split(LiveRange* range, LifetimePosition pos) {
- ASSERT(!range->IsFixed());
- TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
- if (pos.Value() <= range->Start().Value()) {
- return range;
- }
- LiveRange* result = LiveRangeFor(next_virtual_register_++);
- range->SplitAt(pos, result);
- return result;
-}
+ if (second_part->Start().Value() < end.Value()) {
+ // The split result intersects with [start, end[.
+ // Split it at position between ]start+1, end[, spill the middle part
+ // and put the rest to unhandled.
+ LiveRange* third_part = SplitBetween(
+ second_part,
+ second_part->Start().InstructionEnd(),
+ end.PrevInstruction().InstructionEnd());
+ ASSERT(third_part != second_part);
-void LAllocator::SplitAndSpill(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end) {
- // We have an interval range and want to make sure that it is
- // spilled at start and at most spilled until end.
- ASSERT(start.Value() < end.Value());
- LiveRange* tail_part = Split(range, start);
- if (tail_part->Start().Value() < end.Value()) {
- LiveRange* third_part = Split(tail_part,
- tail_part->Start().NextInstruction(),
- end);
- Spill(tail_part);
- ASSERT(third_part != tail_part);
+ Spill(second_part);
AddToUnhandledSorted(third_part);
} else {
- AddToUnhandledSorted(tail_part);
+ // The split result does not intersect with [start, end[.
+ // Nothing to spill. Just put it to unhandled as whole.
+ AddToUnhandledSorted(second_part);
}
}
-void LAllocator::SplitAndSpill(LiveRange* range, LifetimePosition at) {
- LiveRange* second_part = Split(range, at);
- Spill(second_part);
-}
-
-
void LAllocator::Spill(LiveRange* range) {
ASSERT(!range->IsSpilled());
TraceAlloc("Spilling live range %d\n", range->id());
if (!first->HasAllocatedSpillOperand()) {
LOperand* op = TryReuseSpillSlot(range);
- if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == XMM_REGISTERS);
+ if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
first->SetSpillOperand(op);
}
range->MakeSpilled();
class LStackSlot;
class LRegister;
+
// This class represents a single point of a LOperand's lifetime.
// For each lithium instruction there are exactly two lifetime positions:
// the beginning and the end of the instruction. Lifetime positions for
// instruction.
bool IsValid() const { return value_ != -1; }
- static LifetimePosition Invalid() { return LifetimePosition(); }
+ static inline LifetimePosition Invalid() { return LifetimePosition(); }
+
+ static inline LifetimePosition MaxPosition() {
+ // We have to use this kind of getter instead of static member due to
+ // crash bug in GDB.
+ return LifetimePosition(kMaxInt);
+ }
private:
static const int kStep = 2;
};
+enum RegisterKind {
+ NONE,
+ GENERAL_REGISTERS,
+ DOUBLE_REGISTERS
+};
+
+
class LOperand: public ZoneObject {
public:
enum Kind {
explicit LiveRange(int id)
: id_(id),
spilled_(false),
- assigned_double_(false),
assigned_register_(kInvalidAssignment),
+ assigned_register_kind_(NONE),
last_interval_(NULL),
first_interval_(NULL),
first_pos_(NULL),
LOperand* CreateAssignedOperand();
int assigned_register() const { return assigned_register_; }
int spill_start_index() const { return spill_start_index_; }
- void set_assigned_register(int reg, bool double_reg) {
+ void set_assigned_register(int reg, RegisterKind register_kind) {
ASSERT(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
- assigned_double_ = double_reg;
+ assigned_register_kind_ = register_kind;
ConvertOperands();
}
void MakeSpilled() {
// Can this live range be spilled at this position.
bool CanBeSpilled(LifetimePosition pos);
+ // Split this live range at the given position which must follow the start of
+ // the range.
+ // All uses following the given position will be moved from this
+ // live range to the result live range.
void SplitAt(LifetimePosition position, LiveRange* result);
- bool IsDouble() const { return assigned_double_; }
+ bool IsDouble() const { return assigned_register_kind_ == DOUBLE_REGISTERS; }
bool HasRegisterAssigned() const {
return assigned_register_ != kInvalidAssignment;
}
int id_;
bool spilled_;
- bool assigned_double_;
int assigned_register_;
+ RegisterKind assigned_register_kind_;
UseInterval* last_interval_;
UseInterval* first_interval_;
UsePosition* first_pos_;
// Checks whether the value of a given virtual register is tagged.
bool HasTaggedValue(int virtual_register) const;
- // Checks whether the value of a given virtual register is a double.
- bool HasDoubleValue(int virtual_register) const;
+ // Returns the register kind required by the given virtual register.
+ RegisterKind RequiredRegisterKind(int virtual_register) const;
// Begin a new instruction.
void BeginInstruction();
#endif
private:
- enum OperationMode {
- NONE,
- CPU_REGISTERS,
- XMM_REGISTERS
- };
-
void MeetRegisterConstraints();
void ResolvePhis();
void BuildLiveRanges();
// Helper methods for allocating registers.
bool TryAllocateFreeReg(LiveRange* range);
void AllocateBlockedReg(LiveRange* range);
- void SplitAndSpillIntersecting(LiveRange* range);
+
+ // Live range splitting helpers.
+
+ // Split the given range at the given position.
+ // If range starts at or after the given position then the
+ // original range is returned.
+ // Otherwise returns the live range that starts at pos and contains
+ // all uses from the original range that follow pos. Uses at pos will
+ // still be owned by the original range after splitting.
+ LiveRange* SplitAt(LiveRange* range, LifetimePosition pos);
+
+ // Split the given range in a position from the interval [start, end].
+ LiveRange* SplitBetween(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition end);
+
+ // Find a lifetime position in the interval [start, end] which
+ // is optimal for splitting: it is either header of the outermost
+ // loop covered by this interval or the latest possible position.
LifetimePosition FindOptimalSplitPos(LifetimePosition start,
LifetimePosition end);
- LiveRange* Split(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end);
- LiveRange* Split(LiveRange* range, LifetimePosition split_pos);
- void SplitAndSpill(LiveRange* range,
- LifetimePosition start,
- LifetimePosition end);
- void SplitAndSpill(LiveRange* range, LifetimePosition at);
+
+ // Spill the given life range after position pos.
+ void SpillAfter(LiveRange* range, LifetimePosition pos);
+
+ // Spill the given life range after position start and up to position end.
+ void SpillBetween(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition end);
+
+ void SplitAndSpillIntersecting(LiveRange* range);
+
void Spill(LiveRange* range);
bool IsBlockBoundary(LifetimePosition pos);
void AddGapMove(int pos, LiveRange* prev, LiveRange* next);
HPhi* LookupPhi(LOperand* operand) const;
LGap* GetLastGap(HBasicBlock* block) const;
+ const char* RegisterName(int allocation_index);
+
LChunk* chunk_;
ZoneList<InstructionSummary*> summaries_;
InstructionSummary* next_summary_;
// Next virtual register number to be assigned to temporaries.
int next_virtual_register_;
- OperationMode mode_;
+ RegisterKind mode_;
int num_registers_;
HGraph* graph_;