1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if V8_TARGET_ARCH_X64
32 #include "x64/lithium-gap-resolver-x64.h"
33 #include "x64/lithium-codegen-x64.h"
38 LGapResolver::LGapResolver(LCodeGen* owner)
39 : cgen_(owner), moves_(32, owner->zone()) {}
42 void LGapResolver::Resolve(LParallelMove* parallel_move) {
43 ASSERT(moves_.is_empty());
44 // Build up a worklist of moves.
45 BuildInitialMoveList(parallel_move);
47 for (int i = 0; i < moves_.length(); ++i) {
48 LMoveOperands move = moves_[i];
49 // Skip constants to perform them last. They don't block other moves
50 // and skipping such moves with register destinations keeps those
51 // registers free for the whole algorithm.
52 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
57 // Perform the moves with constant sources.
58 for (int i = 0; i < moves_.length(); ++i) {
59 if (!moves_[i].IsEliminated()) {
60 ASSERT(moves_[i].source()->IsConstantOperand());
69 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
70 // Perform a linear sweep of the moves to add them to the initial list of
71 // moves to perform, ignoring any move that is redundant (the source is
72 // the same as the destination, the destination is ignored and
73 // unallocated, or the move was already eliminated).
74 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
75 for (int i = 0; i < moves->length(); ++i) {
76 LMoveOperands move = moves->at(i);
77 if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
83 void LGapResolver::PerformMove(int index) {
84 // Each call to this function performs a move and deletes it from the move
85 // graph. We first recursively perform any move blocking this one. We
86 // mark a move as "pending" on entry to PerformMove in order to detect
87 // cycles in the move graph. We use operand swaps to resolve cycles,
88 // which means that a call to PerformMove could change any source operand
91 ASSERT(!moves_[index].IsPending());
92 ASSERT(!moves_[index].IsRedundant());
94 // Clear this move's destination to indicate a pending move. The actual
95 // destination is saved in a stack-allocated local. Recursion may allow
96 // multiple moves to be pending.
97 ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
98 LOperand* destination = moves_[index].destination();
99 moves_[index].set_destination(NULL);
101 // Perform a depth-first traversal of the move graph to resolve
102 // dependencies. Any unperformed, unpending move with a source the same
103 // as this one's destination blocks this one so recursively perform all
105 for (int i = 0; i < moves_.length(); ++i) {
106 LMoveOperands other_move = moves_[i];
107 if (other_move.Blocks(destination) && !other_move.IsPending()) {
108 // Though PerformMove can change any source operand in the move graph,
109 // this call cannot create a blocking move via a swap (this loop does
110 // not miss any). Assume there is a non-blocking move with source A
111 // and this move is blocked on source B and there is a swap of A and
112 // B. Then A and B must be involved in the same cycle (or they would
113 // not be swapped). Since this move's destination is B and there is
114 // only a single incoming edge to an operand, this move must also be
115 // involved in the same cycle. In that case, the blocking move will
116 // be created but will be "pending" when we return from PerformMove.
121 // We are about to resolve this move and don't need it marked as
122 // pending, so restore its destination.
123 moves_[index].set_destination(destination);
125 // This move's source may have changed due to swaps to resolve cycles and
126 // so it may now be the last move in the cycle. If so remove it.
127 if (moves_[index].source()->Equals(destination)) {
128 moves_[index].Eliminate();
132 // The move may be blocked on a (at most one) pending move, in which case
133 // we have a cycle. Search for such a blocking move and perform a swap to
135 for (int i = 0; i < moves_.length(); ++i) {
136 LMoveOperands other_move = moves_[i];
137 if (other_move.Blocks(destination)) {
138 ASSERT(other_move.IsPending());
144 // This move is not blocked.
149 void LGapResolver::Verify() {
150 #ifdef ENABLE_SLOW_ASSERTS
151 // No operand should be the destination for more than one move.
152 for (int i = 0; i < moves_.length(); ++i) {
153 LOperand* destination = moves_[i].destination();
154 for (int j = i + 1; j < moves_.length(); ++j) {
155 SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
162 #define __ ACCESS_MASM(cgen_->masm())
165 void LGapResolver::EmitMove(int index) {
166 LOperand* source = moves_[index].source();
167 LOperand* destination = moves_[index].destination();
169 // Dispatch on the source and destination operand kinds. Not all
170 // combinations are possible.
171 if (source->IsRegister()) {
172 Register src = cgen_->ToRegister(source);
173 if (destination->IsRegister()) {
174 Register dst = cgen_->ToRegister(destination);
177 ASSERT(destination->IsStackSlot());
178 Operand dst = cgen_->ToOperand(destination);
182 } else if (source->IsStackSlot()) {
183 Operand src = cgen_->ToOperand(source);
184 if (destination->IsRegister()) {
185 Register dst = cgen_->ToRegister(destination);
188 ASSERT(destination->IsStackSlot());
189 Operand dst = cgen_->ToOperand(destination);
190 __ movp(kScratchRegister, src);
191 __ movp(dst, kScratchRegister);
194 } else if (source->IsConstantOperand()) {
195 LConstantOperand* constant_source = LConstantOperand::cast(source);
196 if (destination->IsRegister()) {
197 Register dst = cgen_->ToRegister(destination);
198 if (cgen_->IsSmiConstant(constant_source)) {
199 __ Move(dst, cgen_->ToSmi(constant_source));
200 } else if (cgen_->IsInteger32Constant(constant_source)) {
201 __ Set(dst, static_cast<uint32_t>(cgen_->ToInteger32(constant_source)));
203 __ Move(dst, cgen_->ToHandle(constant_source));
205 } else if (destination->IsDoubleRegister()) {
206 double v = cgen_->ToDouble(constant_source);
207 uint64_t int_val = BitCast<uint64_t, double>(v);
208 XMMRegister dst = cgen_->ToDoubleRegister(destination);
212 __ Set(kScratchRegister, int_val);
213 __ movq(dst, kScratchRegister);
216 ASSERT(destination->IsStackSlot());
217 Operand dst = cgen_->ToOperand(destination);
218 if (cgen_->IsSmiConstant(constant_source)) {
219 __ Move(dst, cgen_->ToSmi(constant_source));
220 } else if (cgen_->IsInteger32Constant(constant_source)) {
221 // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
223 __ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
225 __ Move(kScratchRegister, cgen_->ToHandle(constant_source));
226 __ movp(dst, kScratchRegister);
230 } else if (source->IsDoubleRegister()) {
231 XMMRegister src = cgen_->ToDoubleRegister(source);
232 if (destination->IsDoubleRegister()) {
233 __ movaps(cgen_->ToDoubleRegister(destination), src);
235 ASSERT(destination->IsDoubleStackSlot());
236 __ movsd(cgen_->ToOperand(destination), src);
238 } else if (source->IsDoubleStackSlot()) {
239 Operand src = cgen_->ToOperand(source);
240 if (destination->IsDoubleRegister()) {
241 __ movsd(cgen_->ToDoubleRegister(destination), src);
243 ASSERT(destination->IsDoubleStackSlot());
245 __ movsd(cgen_->ToOperand(destination), xmm0);
247 } else if (source->IsSIMD128Register()) {
248 XMMRegister src = cgen_->ToSIMD128Register(source);
249 if (destination->IsSIMD128Register()) {
250 __ movaps(cgen_->ToSIMD128Register(destination), src);
252 ASSERT(destination->IsSIMD128StackSlot());
253 __ movups(cgen_->ToOperand(destination), src);
255 } else if (source->IsSIMD128StackSlot()) {
256 Operand src = cgen_->ToOperand(source);
257 if (destination->IsSIMD128Register()) {
258 __ movups(cgen_->ToSIMD128Register(destination), src);
260 ASSERT(destination->IsSIMD128StackSlot());
261 __ movups(xmm0, src);
262 __ movups(cgen_->ToOperand(destination), xmm0);
268 moves_[index].Eliminate();
272 void LGapResolver::EmitSwap(int index) {
273 LOperand* source = moves_[index].source();
274 LOperand* destination = moves_[index].destination();
276 // Dispatch on the source and destination operand kinds. Not all
277 // combinations are possible.
278 if (source->IsRegister() && destination->IsRegister()) {
279 // Swap two general-purpose registers.
280 Register src = cgen_->ToRegister(source);
281 Register dst = cgen_->ToRegister(destination);
284 } else if ((source->IsRegister() && destination->IsStackSlot()) ||
285 (source->IsStackSlot() && destination->IsRegister())) {
286 // Swap a general-purpose register and a stack slot.
288 cgen_->ToRegister(source->IsRegister() ? source : destination);
290 cgen_->ToOperand(source->IsRegister() ? destination : source);
291 __ movp(kScratchRegister, mem);
293 __ movp(reg, kScratchRegister);
295 } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
296 (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
297 // Swap two stack slots or two double stack slots.
298 Operand src = cgen_->ToOperand(source);
299 Operand dst = cgen_->ToOperand(destination);
301 __ movp(kScratchRegister, dst);
303 __ movp(src, kScratchRegister);
305 } else if ((source->IsSIMD128StackSlot() &&
306 destination->IsSIMD128StackSlot())) {
307 // Swap two XMM stack slots.
308 STATIC_ASSERT(kSIMD128Size == 2 * kDoubleSize);
309 Operand src = cgen_->ToOperand(source);
310 Operand dst = cgen_->ToOperand(destination);
311 __ movups(xmm0, src);
312 __ movq(kScratchRegister, dst);
313 __ movq(src, kScratchRegister);
314 __ movq(kScratchRegister, Operand(dst, kDoubleSize));
315 __ movq(Operand(src, kDoubleSize), kScratchRegister);
316 __ movups(dst, xmm0);
318 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
319 // Swap two double registers.
320 XMMRegister source_reg = cgen_->ToDoubleRegister(source);
321 XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
322 __ movaps(xmm0, source_reg);
323 __ movaps(source_reg, destination_reg);
324 __ movaps(destination_reg, xmm0);
326 } else if (source->IsSIMD128Register() && destination->IsSIMD128Register()) {
327 // Swap two XMM registers.
328 XMMRegister source_reg = cgen_->ToSIMD128Register(source);
329 XMMRegister destination_reg = cgen_->ToSIMD128Register(destination);
330 __ movaps(xmm0, source_reg);
331 __ movaps(source_reg, destination_reg);
332 __ movaps(destination_reg, xmm0);
334 } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
335 // Swap a double register and a double stack slot.
336 ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
337 (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
338 XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
341 LOperand* other = source->IsDoubleRegister() ? destination : source;
342 ASSERT(other->IsDoubleStackSlot());
343 Operand other_operand = cgen_->ToOperand(other);
344 __ movsd(xmm0, other_operand);
345 __ movsd(other_operand, reg);
346 __ movaps(reg, xmm0);
348 } else if (source->IsSIMD128Register() || destination->IsSIMD128Register()) {
349 // Swap a xmm register and a xmm stack slot.
350 ASSERT((source->IsSIMD128Register() &&
351 destination->IsSIMD128StackSlot()) ||
352 (source->IsSIMD128StackSlot() &&
353 destination->IsSIMD128Register()));
354 XMMRegister reg = cgen_->ToSIMD128Register(source->IsSIMD128Register()
357 LOperand* other = source->IsSIMD128Register() ? destination : source;
358 ASSERT(other->IsSIMD128StackSlot());
359 Operand other_operand = cgen_->ToOperand(other);
360 __ movups(xmm0, other_operand);
361 __ movups(other_operand, reg);
362 __ movaps(reg, xmm0);
364 // No other combinations are possible.
368 // The swap of source and destination has executed a move from source to
370 moves_[index].Eliminate();
372 // Any unperformed (including pending) move with a source of either
373 // this move's source or destination needs to have their source
374 // changed to reflect the state of affairs after the swap.
375 for (int i = 0; i < moves_.length(); ++i) {
376 LMoveOperands other_move = moves_[i];
377 if (other_move.Blocks(source)) {
378 moves_[i].set_source(destination);
379 } else if (other_move.Blocks(destination)) {
380 moves_[i].set_source(source);
387 } } // namespace v8::internal
389 #endif // V8_TARGET_ARCH_X64