1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/x64/lithium-codegen-x64.h"
10 #include "src/x64/lithium-gap-resolver-x64.h"
15 LGapResolver::LGapResolver(LCodeGen* owner)
16 : cgen_(owner), moves_(32, owner->zone()) {}
19 void LGapResolver::Resolve(LParallelMove* parallel_move) {
20 DCHECK(moves_.is_empty());
21 // Build up a worklist of moves.
22 BuildInitialMoveList(parallel_move);
24 for (int i = 0; i < moves_.length(); ++i) {
25 LMoveOperands move = moves_[i];
26 // Skip constants to perform them last. They don't block other moves
27 // and skipping such moves with register destinations keeps those
28 // registers free for the whole algorithm.
29 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
34 // Perform the moves with constant sources.
35 for (int i = 0; i < moves_.length(); ++i) {
36 if (!moves_[i].IsEliminated()) {
37 DCHECK(moves_[i].source()->IsConstantOperand());
46 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
47 // Perform a linear sweep of the moves to add them to the initial list of
48 // moves to perform, ignoring any move that is redundant (the source is
49 // the same as the destination, the destination is ignored and
50 // unallocated, or the move was already eliminated).
51 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
52 for (int i = 0; i < moves->length(); ++i) {
53 LMoveOperands move = moves->at(i);
54 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
60 void LGapResolver::PerformMove(int index) {
61 // Each call to this function performs a move and deletes it from the move
62 // graph. We first recursively perform any move blocking this one. We
63 // mark a move as "pending" on entry to PerformMove in order to detect
64 // cycles in the move graph. We use operand swaps to resolve cycles,
65 // which means that a call to PerformMove could change any source operand
68 DCHECK(!moves_[index].IsPending());
69 DCHECK(!moves_[index].IsRedundant());
71 // Clear this move's destination to indicate a pending move. The actual
72 // destination is saved in a stack-allocated local. Recursion may allow
73 // multiple moves to be pending.
74 DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
75 LOperand* destination = moves_[index].destination();
76 moves_[index].set_destination(NULL);
78 // Perform a depth-first traversal of the move graph to resolve
79 // dependencies. Any unperformed, unpending move with a source the same
80 // as this one's destination blocks this one so recursively perform all
82 for (int i = 0; i < moves_.length(); ++i) {
83 LMoveOperands other_move = moves_[i];
84 if (other_move.Blocks(destination) && !other_move.IsPending()) {
85 // Though PerformMove can change any source operand in the move graph,
86 // this call cannot create a blocking move via a swap (this loop does
87 // not miss any). Assume there is a non-blocking move with source A
88 // and this move is blocked on source B and there is a swap of A and
89 // B. Then A and B must be involved in the same cycle (or they would
90 // not be swapped). Since this move's destination is B and there is
91 // only a single incoming edge to an operand, this move must also be
92 // involved in the same cycle. In that case, the blocking move will
93 // be created but will be "pending" when we return from PerformMove.
98 // We are about to resolve this move and don't need it marked as
99 // pending, so restore its destination.
100 moves_[index].set_destination(destination);
102 // This move's source may have changed due to swaps to resolve cycles and
103 // so it may now be the last move in the cycle. If so remove it.
104 if (moves_[index].source()->Equals(destination)) {
105 moves_[index].Eliminate();
109 // The move may be blocked on a (at most one) pending move, in which case
110 // we have a cycle. Search for such a blocking move and perform a swap to
112 for (int i = 0; i < moves_.length(); ++i) {
113 LMoveOperands other_move = moves_[i];
114 if (other_move.Blocks(destination)) {
115 DCHECK(other_move.IsPending());
121 // This move is not blocked.
126 void LGapResolver::Verify() {
127 #ifdef ENABLE_SLOW_DCHECKS
128 // No operand should be the destination for more than one move.
129 for (int i = 0; i < moves_.length(); ++i) {
130 LOperand* destination = moves_[i].destination();
131 for (int j = i + 1; j < moves_.length(); ++j) {
132 SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
139 #define __ ACCESS_MASM(cgen_->masm())
142 void LGapResolver::EmitMove(int index) {
143 LOperand* source = moves_[index].source();
144 LOperand* destination = moves_[index].destination();
146 // Dispatch on the source and destination operand kinds. Not all
147 // combinations are possible.
148 if (source->IsRegister()) {
149 Register src = cgen_->ToRegister(source);
150 if (destination->IsRegister()) {
151 Register dst = cgen_->ToRegister(destination);
154 DCHECK(destination->IsStackSlot());
155 Operand dst = cgen_->ToOperand(destination);
159 } else if (source->IsStackSlot()) {
160 Operand src = cgen_->ToOperand(source);
161 if (destination->IsRegister()) {
162 Register dst = cgen_->ToRegister(destination);
165 DCHECK(destination->IsStackSlot());
166 Operand dst = cgen_->ToOperand(destination);
167 __ movp(kScratchRegister, src);
168 __ movp(dst, kScratchRegister);
171 } else if (source->IsConstantOperand()) {
172 LConstantOperand* constant_source = LConstantOperand::cast(source);
173 if (destination->IsRegister()) {
174 Register dst = cgen_->ToRegister(destination);
175 if (cgen_->IsSmiConstant(constant_source)) {
176 __ Move(dst, cgen_->ToSmi(constant_source));
177 } else if (cgen_->IsInteger32Constant(constant_source)) {
178 int32_t constant = cgen_->ToInteger32(constant_source);
179 // Do sign extension only for constant used as de-hoisted array key.
180 // Others only need zero extension, which saves 2 bytes.
181 if (cgen_->IsDehoistedKeyConstant(constant_source)) {
182 __ Set(dst, constant);
184 __ Set(dst, static_cast<uint32_t>(constant));
187 __ Move(dst, cgen_->ToHandle(constant_source));
189 } else if (destination->IsDoubleRegister()) {
190 double v = cgen_->ToDouble(constant_source);
191 uint64_t int_val = bit_cast<uint64_t, double>(v);
192 XMMRegister dst = cgen_->ToDoubleRegister(destination);
196 __ Set(kScratchRegister, int_val);
197 __ movq(dst, kScratchRegister);
200 DCHECK(destination->IsStackSlot());
201 Operand dst = cgen_->ToOperand(destination);
202 if (cgen_->IsSmiConstant(constant_source)) {
203 __ Move(dst, cgen_->ToSmi(constant_source));
204 } else if (cgen_->IsInteger32Constant(constant_source)) {
205 // Do sign extension to 64 bits when stored into stack slot.
206 __ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
208 __ Move(kScratchRegister, cgen_->ToHandle(constant_source));
209 __ movp(dst, kScratchRegister);
213 } else if (source->IsDoubleRegister()) {
214 XMMRegister src = cgen_->ToDoubleRegister(source);
215 if (destination->IsDoubleRegister()) {
216 __ movaps(cgen_->ToDoubleRegister(destination), src);
218 DCHECK(destination->IsDoubleStackSlot());
219 __ movsd(cgen_->ToOperand(destination), src);
221 } else if (source->IsDoubleStackSlot()) {
222 Operand src = cgen_->ToOperand(source);
223 if (destination->IsDoubleRegister()) {
224 __ movsd(cgen_->ToDoubleRegister(destination), src);
226 DCHECK(destination->IsDoubleStackSlot());
228 __ movsd(cgen_->ToOperand(destination), xmm0);
230 } else if (source->IsSIMD128Register()) {
231 XMMRegister src = cgen_->ToSIMD128Register(source);
232 if (destination->IsSIMD128Register()) {
233 __ movaps(cgen_->ToSIMD128Register(destination), src);
235 DCHECK(destination->IsSIMD128StackSlot());
236 __ movups(cgen_->ToOperand(destination), src);
238 } else if (source->IsSIMD128StackSlot()) {
239 Operand src = cgen_->ToOperand(source);
240 if (destination->IsSIMD128Register()) {
241 __ movups(cgen_->ToSIMD128Register(destination), src);
243 DCHECK(destination->IsSIMD128StackSlot());
244 __ movups(xmm0, src);
245 __ movups(cgen_->ToOperand(destination), xmm0);
251 moves_[index].Eliminate();
255 void LGapResolver::EmitSwap(int index) {
256 LOperand* source = moves_[index].source();
257 LOperand* destination = moves_[index].destination();
259 // Dispatch on the source and destination operand kinds. Not all
260 // combinations are possible.
261 if (source->IsRegister() && destination->IsRegister()) {
262 // Swap two general-purpose registers.
263 Register src = cgen_->ToRegister(source);
264 Register dst = cgen_->ToRegister(destination);
267 } else if ((source->IsRegister() && destination->IsStackSlot()) ||
268 (source->IsStackSlot() && destination->IsRegister())) {
269 // Swap a general-purpose register and a stack slot.
271 cgen_->ToRegister(source->IsRegister() ? source : destination);
273 cgen_->ToOperand(source->IsRegister() ? destination : source);
274 __ movp(kScratchRegister, mem);
276 __ movp(reg, kScratchRegister);
278 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
279 (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
280 // Swap two stack slots or two double stack slots.
281 Operand src = cgen_->ToOperand(source);
282 Operand dst = cgen_->ToOperand(destination);
284 __ movp(kScratchRegister, dst);
286 __ movp(src, kScratchRegister);
288 } else if ((source->IsSIMD128StackSlot() &&
289 destination->IsSIMD128StackSlot())) {
290 // Swap two XMM stack slots.
291 STATIC_ASSERT(kSIMD128Size == 2 * kDoubleSize);
292 Operand src = cgen_->ToOperand(source);
293 Operand dst = cgen_->ToOperand(destination);
294 __ movups(xmm0, src);
295 __ movq(kScratchRegister, dst);
296 __ movq(src, kScratchRegister);
297 __ movq(kScratchRegister, Operand(dst, kDoubleSize));
298 __ movq(Operand(src, kDoubleSize), kScratchRegister);
299 __ movups(dst, xmm0);
301 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
302 // Swap two double registers.
303 XMMRegister source_reg = cgen_->ToDoubleRegister(source);
304 XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
305 __ movaps(xmm0, source_reg);
306 __ movaps(source_reg, destination_reg);
307 __ movaps(destination_reg, xmm0);
309 } else if (source->IsSIMD128Register() && destination->IsSIMD128Register()) {
310 // Swap two XMM registers.
311 XMMRegister source_reg = cgen_->ToSIMD128Register(source);
312 XMMRegister destination_reg = cgen_->ToSIMD128Register(destination);
313 __ movaps(xmm0, source_reg);
314 __ movaps(source_reg, destination_reg);
315 __ movaps(destination_reg, xmm0);
317 } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
318 // Swap a double register and a double stack slot.
319 DCHECK((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
320 (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
321 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
324 LOperand* other = source->IsDoubleRegister() ? destination : source;
325 DCHECK(other->IsDoubleStackSlot());
326 Operand other_operand = cgen_->ToOperand(other);
327 __ movsd(xmm0, other_operand);
328 __ movsd(other_operand, reg);
329 __ movaps(reg, xmm0);
331 } else if (source->IsSIMD128Register() || destination->IsSIMD128Register()) {
332 // Swap a xmm register and a xmm stack slot.
333 DCHECK((source->IsSIMD128Register() &&
334 destination->IsSIMD128StackSlot()) ||
335 (source->IsSIMD128StackSlot() &&
336 destination->IsSIMD128Register()));
337 XMMRegister reg = cgen_->ToSIMD128Register(source->IsSIMD128Register()
340 LOperand* other = source->IsSIMD128Register() ? destination : source;
341 DCHECK(other->IsSIMD128StackSlot());
342 Operand other_operand = cgen_->ToOperand(other);
343 __ movups(xmm0, other_operand);
344 __ movups(other_operand, reg);
345 __ movaps(reg, xmm0);
347 // No other combinations are possible.
351 // The swap of source and destination has executed a move from source to
353 moves_[index].Eliminate();
355 // Any unperformed (including pending) move with a source of either
356 // this move's source or destination needs to have their source
357 // changed to reflect the state of affairs after the swap.
358 for (int i = 0; i < moves_.length(); ++i) {
359 LMoveOperands other_move = moves_[i];
360 if (other_move.Blocks(source)) {
361 moves_[i].set_source(destination);
362 } else if (other_move.Blocks(destination)) {
363 moves_[i].set_source(source);
370 } } // namespace v8::internal
372 #endif // V8_TARGET_ARCH_X64