1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if V8_TARGET_ARCH_IA32
32 #include "ia32/lithium-gap-resolver-ia32.h"
33 #include "ia32/lithium-codegen-ia32.h"
38 LGapResolver::LGapResolver(LCodeGen* owner)
40 moves_(32, owner->zone()),
43 spilled_register_(-1) {}
46 void LGapResolver::Resolve(LParallelMove* parallel_move) {
47 ASSERT(HasBeenReset());
48 // Build up a worklist of moves.
49 BuildInitialMoveList(parallel_move);
51 for (int i = 0; i < moves_.length(); ++i) {
52 LMoveOperands move = moves_[i];
53 // Skip constants to perform them last. They don't block other moves
54 // and skipping such moves with register destinations keeps those
55 // registers free for the whole algorithm.
56 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
61 // Perform the moves with constant sources.
62 for (int i = 0; i < moves_.length(); ++i) {
63 if (!moves_[i].IsEliminated()) {
64 ASSERT(moves_[i].source()->IsConstantOperand());
70 ASSERT(HasBeenReset());
74 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
75 // Perform a linear sweep of the moves to add them to the initial list of
76 // moves to perform, ignoring any move that is redundant (the source is
77 // the same as the destination, the destination is ignored and
78 // unallocated, or the move was already eliminated).
79 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
80 for (int i = 0; i < moves->length(); ++i) {
81 LMoveOperands move = moves->at(i);
82 if (!move.IsRedundant()) AddMove(move);
88 void LGapResolver::PerformMove(int index) {
89 // Each call to this function performs a move and deletes it from the move
90 // graph. We first recursively perform any move blocking this one. We
91 // mark a move as "pending" on entry to PerformMove in order to detect
92 // cycles in the move graph. We use operand swaps to resolve cycles,
93 // which means that a call to PerformMove could change any source operand
96 ASSERT(!moves_[index].IsPending());
97 ASSERT(!moves_[index].IsRedundant());
99 // Clear this move's destination to indicate a pending move. The actual
100 // destination is saved on the side.
101 ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
102 LOperand* destination = moves_[index].destination();
103 moves_[index].set_destination(NULL);
105 // Perform a depth-first traversal of the move graph to resolve
106 // dependencies. Any unperformed, unpending move with a source the same
107 // as this one's destination blocks this one so recursively perform all
109 for (int i = 0; i < moves_.length(); ++i) {
110 LMoveOperands other_move = moves_[i];
111 if (other_move.Blocks(destination) && !other_move.IsPending()) {
112 // Though PerformMove can change any source operand in the move graph,
113 // this call cannot create a blocking move via a swap (this loop does
114 // not miss any). Assume there is a non-blocking move with source A
115 // and this move is blocked on source B and there is a swap of A and
116 // B. Then A and B must be involved in the same cycle (or they would
117 // not be swapped). Since this move's destination is B and there is
118 // only a single incoming edge to an operand, this move must also be
119 // involved in the same cycle. In that case, the blocking move will
120 // be created but will be "pending" when we return from PerformMove.
125 // We are about to resolve this move and don't need it marked as
126 // pending, so restore its destination.
127 moves_[index].set_destination(destination);
129 // This move's source may have changed due to swaps to resolve cycles and
130 // so it may now be the last move in the cycle. If so remove it.
131 if (moves_[index].source()->Equals(destination)) {
136 // The move may be blocked on a (at most one) pending move, in which case
137 // we have a cycle. Search for such a blocking move and perform a swap to
139 for (int i = 0; i < moves_.length(); ++i) {
140 LMoveOperands other_move = moves_[i];
141 if (other_move.Blocks(destination)) {
142 ASSERT(other_move.IsPending());
148 // This move is not blocked.
153 void LGapResolver::AddMove(LMoveOperands move) {
154 LOperand* source = move.source();
155 if (source->IsRegister()) ++source_uses_[source->index()];
157 LOperand* destination = move.destination();
158 if (destination->IsRegister()) ++destination_uses_[destination->index()];
160 moves_.Add(move, cgen_->zone());
164 void LGapResolver::RemoveMove(int index) {
165 LOperand* source = moves_[index].source();
166 if (source->IsRegister()) {
167 --source_uses_[source->index()];
168 ASSERT(source_uses_[source->index()] >= 0);
171 LOperand* destination = moves_[index].destination();
172 if (destination->IsRegister()) {
173 --destination_uses_[destination->index()];
174 ASSERT(destination_uses_[destination->index()] >= 0);
177 moves_[index].Eliminate();
181 int LGapResolver::CountSourceUses(LOperand* operand) {
183 for (int i = 0; i < moves_.length(); ++i) {
184 if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
192 Register LGapResolver::GetFreeRegisterNot(Register reg) {
193 int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
194 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
195 if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
196 return Register::FromAllocationIndex(i);
203 bool LGapResolver::HasBeenReset() {
204 if (!moves_.is_empty()) return false;
205 if (spilled_register_ >= 0) return false;
207 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
208 if (source_uses_[i] != 0) return false;
209 if (destination_uses_[i] != 0) return false;
215 void LGapResolver::Verify() {
216 #ifdef ENABLE_SLOW_ASSERTS
217 // No operand should be the destination for more than one move.
218 for (int i = 0; i < moves_.length(); ++i) {
219 LOperand* destination = moves_[i].destination();
220 for (int j = i + 1; j < moves_.length(); ++j) {
221 SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
228 #define __ ACCESS_MASM(cgen_->masm())
230 void LGapResolver::Finish() {
231 if (spilled_register_ >= 0) {
232 __ pop(Register::FromAllocationIndex(spilled_register_));
233 spilled_register_ = -1;
239 void LGapResolver::EnsureRestored(LOperand* operand) {
240 if (operand->IsRegister() && operand->index() == spilled_register_) {
241 __ pop(Register::FromAllocationIndex(spilled_register_));
242 spilled_register_ = -1;
247 Register LGapResolver::EnsureTempRegister() {
248 // 1. We may have already spilled to create a temp register.
249 if (spilled_register_ >= 0) {
250 return Register::FromAllocationIndex(spilled_register_);
253 // 2. We may have a free register that we can use without spilling.
254 Register free = GetFreeRegisterNot(no_reg);
255 if (!free.is(no_reg)) return free;
257 // 3. Prefer to spill a register that is not used in any remaining move
258 // because it will not need to be restored until the end.
259 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
260 if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
261 Register scratch = Register::FromAllocationIndex(i);
263 spilled_register_ = i;
268 // 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
269 Register scratch = Register::FromAllocationIndex(0);
271 spilled_register_ = 0;
276 void LGapResolver::EmitMove(int index) {
277 LOperand* source = moves_[index].source();
278 LOperand* destination = moves_[index].destination();
279 EnsureRestored(source);
280 EnsureRestored(destination);
282 // Dispatch on the source and destination operand kinds. Not all
283 // combinations are possible.
284 if (source->IsRegister()) {
285 ASSERT(destination->IsRegister() || destination->IsStackSlot());
286 Register src = cgen_->ToRegister(source);
287 Operand dst = cgen_->ToOperand(destination);
290 } else if (source->IsStackSlot()) {
291 ASSERT(destination->IsRegister() || destination->IsStackSlot());
292 Operand src = cgen_->ToOperand(source);
293 if (destination->IsRegister()) {
294 Register dst = cgen_->ToRegister(destination);
297 // Spill on demand to use a temporary register for memory-to-memory
299 Register tmp = EnsureTempRegister();
300 Operand dst = cgen_->ToOperand(destination);
305 } else if (source->IsConstantOperand()) {
306 LConstantOperand* constant_source = LConstantOperand::cast(source);
307 if (destination->IsRegister()) {
308 Register dst = cgen_->ToRegister(destination);
309 Representation r = cgen_->IsSmi(constant_source)
310 ? Representation::Smi() : Representation::Integer32();
311 if (cgen_->IsInteger32(constant_source)) {
312 __ Move(dst, cgen_->ToImmediate(constant_source, r));
314 __ LoadObject(dst, cgen_->ToHandle(constant_source));
316 } else if (destination->IsDoubleRegister()) {
317 double v = cgen_->ToDouble(constant_source);
318 uint64_t int_val = BitCast<uint64_t, double>(v);
319 int32_t lower = static_cast<int32_t>(int_val);
320 int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
321 if (CpuFeatures::IsSupported(SSE2)) {
322 CpuFeatureScope scope(cgen_->masm(), SSE2);
323 XMMRegister dst = cgen_->ToDoubleRegister(destination);
327 __ push(Immediate(upper));
328 __ push(Immediate(lower));
329 __ movsd(dst, Operand(esp, 0));
330 __ add(esp, Immediate(kDoubleSize));
333 __ push(Immediate(upper));
334 __ push(Immediate(lower));
335 X87Register dst = cgen_->ToX87Register(destination);
336 cgen_->X87Mov(dst, MemOperand(esp, 0));
337 __ add(esp, Immediate(kDoubleSize));
340 ASSERT(destination->IsStackSlot());
341 Operand dst = cgen_->ToOperand(destination);
342 Representation r = cgen_->IsSmi(constant_source)
343 ? Representation::Smi() : Representation::Integer32();
344 if (cgen_->IsInteger32(constant_source)) {
345 __ Move(dst, cgen_->ToImmediate(constant_source, r));
347 Register tmp = EnsureTempRegister();
348 __ LoadObject(tmp, cgen_->ToHandle(constant_source));
353 } else if (source->IsDoubleRegister()) {
354 if (CpuFeatures::IsSupported(SSE2)) {
355 CpuFeatureScope scope(cgen_->masm(), SSE2);
356 XMMRegister src = cgen_->ToDoubleRegister(source);
357 if (destination->IsDoubleRegister()) {
358 XMMRegister dst = cgen_->ToDoubleRegister(destination);
361 ASSERT(destination->IsDoubleStackSlot());
362 Operand dst = cgen_->ToOperand(destination);
366 // load from the register onto the stack, store in destination, which must
367 // be a double stack slot in the non-SSE2 case.
368 ASSERT(destination->IsDoubleStackSlot());
369 Operand dst = cgen_->ToOperand(destination);
370 X87Register src = cgen_->ToX87Register(source);
371 cgen_->X87Mov(dst, src);
373 } else if (source->IsDoubleStackSlot()) {
374 if (CpuFeatures::IsSupported(SSE2)) {
375 CpuFeatureScope scope(cgen_->masm(), SSE2);
376 ASSERT(destination->IsDoubleRegister() ||
377 destination->IsDoubleStackSlot());
378 Operand src = cgen_->ToOperand(source);
379 if (destination->IsDoubleRegister()) {
380 XMMRegister dst = cgen_->ToDoubleRegister(destination);
383 // We rely on having xmm0 available as a fixed scratch register.
384 Operand dst = cgen_->ToOperand(destination);
389 // load from the stack slot on top of the floating point stack, and then
390 // store in destination. If destination is a double register, then it
391 // represents the top of the stack and nothing needs to be done.
392 if (destination->IsDoubleStackSlot()) {
393 Register tmp = EnsureTempRegister();
394 Operand src0 = cgen_->ToOperand(source);
395 Operand src1 = cgen_->HighOperand(source);
396 Operand dst0 = cgen_->ToOperand(destination);
397 Operand dst1 = cgen_->HighOperand(destination);
398 __ mov(tmp, src0); // Then use tmp to copy source to destination.
403 Operand src = cgen_->ToOperand(source);
404 X87Register dst = cgen_->ToX87Register(destination);
405 cgen_->X87Mov(dst, src);
408 } else if (source->IsSIMD128Register()) {
409 ASSERT(CpuFeatures::IsSupported(SSE2));
410 CpuFeatureScope scope(cgen_->masm(), SSE2);
411 XMMRegister src = cgen_->ToSIMD128Register(source);
412 if (destination->IsSIMD128Register()) {
413 __ movaps(cgen_->ToSIMD128Register(destination), src);
415 ASSERT(destination->IsSIMD128StackSlot());
416 __ movups(cgen_->ToOperand(destination), src);
418 } else if (source->IsSIMD128StackSlot()) {
419 ASSERT(CpuFeatures::IsSupported(SSE2));
420 CpuFeatureScope scope(cgen_->masm(), SSE2);
421 Operand src = cgen_->ToOperand(source);
422 if (destination->IsSIMD128Register()) {
423 __ movups(cgen_->ToSIMD128Register(destination), src);
425 ASSERT(destination->IsSIMD128StackSlot());
426 __ movups(xmm0, src);
427 __ movups(cgen_->ToOperand(destination), xmm0);
437 void LGapResolver::EmitSwap(int index) {
438 LOperand* source = moves_[index].source();
439 LOperand* destination = moves_[index].destination();
440 EnsureRestored(source);
441 EnsureRestored(destination);
443 // Dispatch on the source and destination operand kinds. Not all
444 // combinations are possible.
445 if (source->IsRegister() && destination->IsRegister()) {
446 // Register-register.
447 Register src = cgen_->ToRegister(source);
448 Register dst = cgen_->ToRegister(destination);
451 } else if ((source->IsRegister() && destination->IsStackSlot()) ||
452 (source->IsStackSlot() && destination->IsRegister())) {
453 // Register-memory. Use a free register as a temp if possible. Do not
454 // spill on demand because the simple spill implementation cannot avoid
455 // spilling src at this point.
456 Register tmp = GetFreeRegisterNot(no_reg);
458 cgen_->ToRegister(source->IsRegister() ? source : destination);
460 cgen_->ToOperand(source->IsRegister() ? destination : source);
461 if (tmp.is(no_reg)) {
471 } else if (source->IsStackSlot() && destination->IsStackSlot()) {
472 // Memory-memory. Spill on demand to use a temporary. If there is a
473 // free register after that, use it as a second temporary.
474 Register tmp0 = EnsureTempRegister();
475 Register tmp1 = GetFreeRegisterNot(tmp0);
476 Operand src = cgen_->ToOperand(source);
477 Operand dst = cgen_->ToOperand(destination);
478 if (tmp1.is(no_reg)) {
479 // Only one temp register available to us.
491 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
492 CpuFeatureScope scope(cgen_->masm(), SSE2);
493 // XMM register-register swap. We rely on having xmm0
494 // available as a fixed scratch register.
495 XMMRegister src = cgen_->ToDoubleRegister(source);
496 XMMRegister dst = cgen_->ToDoubleRegister(destination);
497 __ movaps(xmm0, src);
499 __ movaps(dst, xmm0);
500 } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
501 CpuFeatureScope scope(cgen_->masm(), SSE2);
502 // XMM register-memory swap. We rely on having xmm0
503 // available as a fixed scratch register.
504 ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
505 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
509 cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
510 __ movsd(xmm0, other);
511 __ movsd(other, reg);
512 __ movaps(reg, xmm0);
513 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
514 CpuFeatureScope scope(cgen_->masm(), SSE2);
515 // Double-width memory-to-memory. Spill on demand to use a general
516 // purpose temporary register and also rely on having xmm0 available as
517 // a fixed scratch register.
518 Register tmp = EnsureTempRegister();
519 Operand src0 = cgen_->ToOperand(source);
520 Operand src1 = cgen_->HighOperand(source);
521 Operand dst0 = cgen_->ToOperand(destination);
522 Operand dst1 = cgen_->HighOperand(destination);
523 __ movsd(xmm0, dst0); // Save destination in xmm0.
524 __ mov(tmp, src0); // Then use tmp to copy source to destination.
528 __ movsd(src0, xmm0);
530 } else if ((source->IsSIMD128StackSlot() &&
531 destination->IsSIMD128StackSlot())) {
532 CpuFeatureScope scope(cgen_->masm(), SSE2);
533 // Swap two XMM stack slots.
534 Operand src = cgen_->ToOperand(source);
535 Operand dst = cgen_->ToOperand(destination);
536 Register tmp = EnsureTempRegister();
537 __ movups(xmm0, src);
538 for (int offset = 0; offset < kSIMD128Size; offset += kPointerSize) {
539 __ mov(tmp, Operand(dst, offset));
540 __ mov(Operand(src, offset), tmp);
542 __ movups(dst, xmm0);
544 } else if (source->IsSIMD128Register() && destination->IsSIMD128Register()) {
545 CpuFeatureScope scope(cgen_->masm(), SSE2);
546 // Swap two XMM registers.
547 XMMRegister source_reg = cgen_->ToSIMD128Register(source);
548 XMMRegister destination_reg = cgen_->ToSIMD128Register(destination);
549 __ movaps(xmm0, source_reg);
550 __ movaps(source_reg, destination_reg);
551 __ movaps(destination_reg, xmm0);
553 } else if (source->IsSIMD128Register() || destination->IsSIMD128Register()) {
554 CpuFeatureScope scope(cgen_->masm(), SSE2);
555 // Swap a xmm register and a xmm stack slot.
556 ASSERT((source->IsSIMD128Register() &&
557 destination->IsSIMD128StackSlot()) ||
558 (source->IsSIMD128StackSlot() &&
559 destination->IsSIMD128Register()));
560 XMMRegister reg = cgen_->ToSIMD128Register(source->IsSIMD128Register()
563 LOperand* other = source->IsSIMD128Register() ? destination : source;
564 ASSERT(other->IsSIMD128StackSlot());
565 Operand other_operand = cgen_->ToOperand(other);
566 __ movups(xmm0, other_operand);
567 __ movups(other_operand, reg);
568 __ movaps(reg, xmm0);
571 // No other combinations are possible.
575 // The swap of source and destination has executed a move from source to
579 // Any unperformed (including pending) move with a source of either
580 // this move's source or destination needs to have their source
581 // changed to reflect the state of affairs after the swap.
582 for (int i = 0; i < moves_.length(); ++i) {
583 LMoveOperands other_move = moves_[i];
584 if (other_move.Blocks(source)) {
585 moves_[i].set_source(destination);
586 } else if (other_move.Blocks(destination)) {
587 moves_[i].set_source(source);
591 // In addition to swapping the actual uses as sources, we need to update
593 if (source->IsRegister() && destination->IsRegister()) {
594 int temp = source_uses_[source->index()];
595 source_uses_[source->index()] = source_uses_[destination->index()];
596 source_uses_[destination->index()] = temp;
597 } else if (source->IsRegister()) {
598 // We don't have use counts for non-register operands like destination.
599 // Compute those counts now.
600 source_uses_[source->index()] = CountSourceUses(source);
601 } else if (destination->IsRegister()) {
602 source_uses_[destination->index()] = CountSourceUses(destination);
608 } } // namespace v8::internal
610 #endif // V8_TARGET_ARCH_IA32