// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_IA32
-#include "ia32/lithium-gap-resolver-ia32.h"
-#include "ia32/lithium-codegen-ia32.h"
+#include "src/ia32/lithium-codegen-ia32.h"
+#include "src/ia32/lithium-gap-resolver-ia32.h"
namespace v8 {
namespace internal {
void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(HasBeenReset());
+ DCHECK(HasBeenReset());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
- ASSERT(moves_[i].source()->IsConstantOperand());
+ DCHECK(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
Finish();
- ASSERT(HasBeenReset());
+ DCHECK(HasBeenReset());
}
// which means that a call to PerformMove could change any source operand
// in the move graph.
- ASSERT(!moves_[index].IsPending());
- ASSERT(!moves_[index].IsRedundant());
+ DCHECK(!moves_[index].IsPending());
+ DCHECK(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved on the side.
- ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+ DCHECK(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
+ DCHECK(other_move.IsPending());
EmitSwap(index);
return;
}
LOperand* source = moves_[index].source();
if (source->IsRegister()) {
--source_uses_[source->index()];
- ASSERT(source_uses_[source->index()] >= 0);
+ DCHECK(source_uses_[source->index()] >= 0);
}
LOperand* destination = moves_[index].destination();
if (destination->IsRegister()) {
--destination_uses_[destination->index()];
- ASSERT(destination_uses_[destination->index()] >= 0);
+ DCHECK(destination_uses_[destination->index()] >= 0);
}
moves_[index].Eliminate();
void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
}
}
#endif
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
- ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
Register src = cgen_->ToRegister(source);
Operand dst = cgen_->ToOperand(destination);
__ mov(dst, src);
} else if (source->IsStackSlot()) {
- ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
Operand src = cgen_->ToOperand(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
}
} else if (destination->IsDoubleRegister()) {
double v = cgen_->ToDouble(constant_source);
- uint64_t int_val = BitCast<uint64_t, double>(v);
+ uint64_t int_val = bit_cast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- if (int_val == 0) {
- __ xorps(dst, dst);
- } else {
- __ push(Immediate(upper));
- __ push(Immediate(lower));
- __ movsd(dst, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- }
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ if (int_val == 0) {
+ __ xorps(dst, dst);
} else {
__ push(Immediate(upper));
__ push(Immediate(lower));
- X87Register dst = cgen_->ToX87Register(destination);
- cgen_->X87Mov(dst, MemOperand(esp, 0));
+ __ movsd(dst, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
} else {
- ASSERT(destination->IsStackSlot());
+ DCHECK(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32();
}
} else if (source->IsDoubleRegister()) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
- XMMRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movaps(dst, src);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movsd(dst, src);
- }
+ XMMRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movaps(dst, src);
} else {
- // load from the register onto the stack, store in destination, which must
- // be a double stack slot in the non-SSE2 case.
- ASSERT(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
- X87Register src = cgen_->ToX87Register(source);
- cgen_->X87Mov(dst, src);
+ __ movsd(dst, src);
}
} else if (source->IsDoubleStackSlot()) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
- ASSERT(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movsd(dst, src);
- } else {
- // We rely on having xmm0 available as a fixed scratch register.
- Operand dst = cgen_->ToOperand(destination);
- __ movsd(xmm0, src);
- __ movsd(dst, xmm0);
- }
+ DCHECK(destination->IsDoubleRegister() ||
+ destination->IsDoubleStackSlot());
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movsd(dst, src);
} else {
- // load from the stack slot on top of the floating point stack, and then
- // store in destination. If destination is a double register, then it
- // represents the top of the stack and nothing needs to be done.
- if (destination->IsDoubleStackSlot()) {
- Register tmp = EnsureTempRegister();
- Operand src0 = cgen_->ToOperand(source);
- Operand src1 = cgen_->HighOperand(source);
- Operand dst0 = cgen_->ToOperand(destination);
- Operand dst1 = cgen_->HighOperand(destination);
- __ mov(tmp, src0); // Then use tmp to copy source to destination.
- __ mov(dst0, tmp);
- __ mov(tmp, src1);
- __ mov(dst1, tmp);
- } else {
- Operand src = cgen_->ToOperand(source);
- X87Register dst = cgen_->ToX87Register(destination);
- cgen_->X87Mov(dst, src);
- }
+ // We rely on having xmm0 available as a fixed scratch register.
+ Operand dst = cgen_->ToOperand(destination);
+ __ movsd(xmm0, src);
+ __ movsd(dst, xmm0);
}
} else if (source->IsSIMD128Register()) {
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatureScope scope(cgen_->masm(), SSE2);
XMMRegister src = cgen_->ToSIMD128Register(source);
if (destination->IsSIMD128Register()) {
__ movaps(cgen_->ToSIMD128Register(destination), src);
} else {
- ASSERT(destination->IsSIMD128StackSlot());
+ DCHECK(destination->IsSIMD128StackSlot());
__ movups(cgen_->ToOperand(destination), src);
}
} else if (source->IsSIMD128StackSlot()) {
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatureScope scope(cgen_->masm(), SSE2);
Operand src = cgen_->ToOperand(source);
if (destination->IsSIMD128Register()) {
__ movups(cgen_->ToSIMD128Register(destination), src);
} else {
- ASSERT(destination->IsSIMD128StackSlot());
+ DCHECK(destination->IsSIMD128StackSlot());
__ movups(xmm0, src);
__ movups(cgen_->ToOperand(destination), xmm0);
}
__ mov(src, tmp0);
}
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
// XMM register-register swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = cgen_->ToDoubleRegister(source);
__ movaps(src, dst);
__ movaps(dst, xmm0);
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
// XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
- ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
+ DCHECK(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
? source
: destination);
__ movsd(other, reg);
__ movaps(reg, xmm0);
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
// Double-width memory-to-memory. Spill on demand to use a general
// purpose temporary register and also rely on having xmm0 available as
// a fixed scratch register.
} else if ((source->IsSIMD128StackSlot() &&
destination->IsSIMD128StackSlot())) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
// Swap two XMM stack slots.
Operand src = cgen_->ToOperand(source);
Operand dst = cgen_->ToOperand(destination);
__ movups(dst, xmm0);
} else if (source->IsSIMD128Register() && destination->IsSIMD128Register()) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
// Swap two XMM registers.
XMMRegister source_reg = cgen_->ToSIMD128Register(source);
XMMRegister destination_reg = cgen_->ToSIMD128Register(destination);
__ movaps(destination_reg, xmm0);
} else if (source->IsSIMD128Register() || destination->IsSIMD128Register()) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
// Swap a xmm register and a xmm stack slot.
- ASSERT((source->IsSIMD128Register() &&
+ DCHECK((source->IsSIMD128Register() &&
destination->IsSIMD128StackSlot()) ||
(source->IsSIMD128StackSlot() &&
destination->IsSIMD128Register()));
? source
: destination);
LOperand* other = source->IsSIMD128Register() ? destination : source;
- ASSERT(other->IsSIMD128StackSlot());
+ DCHECK(other->IsSIMD128StackSlot());
Operand other_operand = cgen_->ToOperand(other);
__ movups(xmm0, other_operand);
__ movups(other_operand, reg);