// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "cpu-profiler.h"
-#include "assembler-x64.h"
-#include "macro-assembler-x64.h"
-#include "serialize.h"
-#include "debug.h"
-#include "heap.h"
-#include "isolate-inl.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/heap/heap.h"
+#include "src/isolate-inl.h"
+#include "src/serialize.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/macro-assembler-x64.h"
namespace v8 {
namespace internal {
}
-static const int kInvalidRootRegisterDelta = -1;
+static const int64_t kInvalidRootRegisterDelta = -1;
-intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
+int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
if (predictable_code_size() &&
(other.address() < reinterpret_cast<Address>(isolate()) ||
other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
}
Address roots_register_value = kRootRegisterBias +
reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
- intptr_t delta = other.address() - roots_register_value;
+
+ int64_t delta = kInvalidRootRegisterDelta; // Bogus initialization.
+ if (kPointerSize == kInt64Size) {
+ delta = other.address() - roots_register_value;
+ } else {
+ // For x32, zero extend the address to 64-bit and calculate the delta.
+ uint64_t o = static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(other.address()));
+ uint64_t r = static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(roots_register_value));
+ delta = o - r;
+ }
return delta;
}
Operand MacroAssembler::ExternalOperand(ExternalReference target,
Register scratch) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(target);
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(target);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
return Operand(kRootRegister, static_cast<int32_t>(delta));
}
}
void MacroAssembler::Load(Register destination, ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source);
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
void MacroAssembler::Store(ExternalReference destination, Register source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(destination);
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(destination);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
return;
}
void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source);
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
int MacroAssembler::LoadAddressSize(ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
+ if (root_array_available_ && !serializer_enabled()) {
// This calculation depends on the internals of LoadAddress.
// It's correctness is ensured by the asserts in the Call
// instruction below.
- intptr_t delta = RootRegisterDelta(source);
+ int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- // Operand is lea(scratch, Operand(kRootRegister, delta));
+ // Operand is leap(scratch, Operand(kRootRegister, delta));
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
int size = 4;
if (!is_int8(static_cast<int32_t>(delta))) {
void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
- if (is_int32(address) && !Serializer::enabled()) {
+ if (is_int32(address) && !serializer_enabled()) {
if (emit_debug_code()) {
Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
}
- push(Immediate(static_cast<int32_t>(address)));
+ Push(Immediate(static_cast<int32_t>(address)));
return;
}
LoadAddress(kScratchRegister, source);
- push(kScratchRegister);
+ Push(kScratchRegister);
}
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
+ DCHECK(root_array_available_);
movp(destination, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::LoadRootIndexed(Register destination,
Register variable_offset,
int fixed_offset) {
- ASSERT(root_array_available_);
+ DCHECK(root_array_available_);
movp(destination,
Operand(kRootRegister,
variable_offset, times_pointer_size,
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
+ DCHECK(root_array_available_);
movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
source);
}
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
+ DCHECK(root_array_available_);
+ Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- cmpq(with, Operand(kRootRegister,
+ DCHECK(root_array_available_);
+ cmpp(with, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(const Operand& with,
Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- ASSERT(!with.AddressUsesRegister(kScratchRegister));
+ DCHECK(root_array_available_);
+ DCHECK(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
- cmpq(with, kScratchRegister);
+ cmpp(with, kScratchRegister);
}
// Store pointer to buffer.
movp(Operand(scratch, 0), addr);
// Increment buffer top.
- addq(scratch, Immediate(kPointerSize));
+ addp(scratch, Immediate(kPointerSize));
// Write back new top of buffer.
StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Call stub on end of buffer.
Label done;
// Check for end of buffer.
- testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kReturnAtEnd) {
Label buffer_overflowed;
j(not_equal, &buffer_overflowed, Label::kNear);
ret(0);
bind(&buffer_overflowed);
} else {
- ASSERT(and_then == kFallThroughAtEnd);
+ DCHECK(and_then == kFallThroughAtEnd);
j(equal, &done, Label::kNear);
}
StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(save_fp);
+ StoreBufferOverflowStub(isolate(), save_fp);
CallStub(&store_buffer_overflow);
if (and_then == kReturnAtEnd) {
ret(0);
} else {
- ASSERT(and_then == kFallThroughAtEnd);
+ DCHECK(and_then == kFallThroughAtEnd);
bind(&done);
}
}
Condition cc,
Label* branch,
Label::Distance distance) {
- if (Serializer::enabled()) {
+ if (serializer_enabled()) {
// Can't do arithmetic on external references if it might get serialized.
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
// and the running system.
if (scratch.is(object)) {
Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
- and_(scratch, kScratchRegister);
+ andp(scratch, kScratchRegister);
} else {
Move(scratch, ExternalReference::new_space_mask(isolate()));
- and_(scratch, object);
+ andp(scratch, object);
}
Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
- cmpq(scratch, kScratchRegister);
+ cmpp(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
- ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
+ DCHECK(kPointerSize == kInt64Size
+ ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
+ : kPointerSize == kInt32Size);
intptr_t new_space_start =
reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
Assembler::RelocInfoNone());
if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
+ addp(scratch, kScratchRegister);
} else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+ leap(scratch, Operand(object, kScratchRegister, times_1, 0));
}
- and_(scratch,
+ andp(scratch,
Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
j(cc, branch, distance);
}
Register dst,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
+ DCHECK(IsAligned(offset, kPointerSize));
- lea(dst, FieldOperand(object, offset));
+ leap(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
bind(&ok);
}
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
}
-void MacroAssembler::RecordWriteArray(Register object,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWriteArray(
+ Register object,
+ Register value,
+ Register index,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
// Array access: calculate the destination address. Index is not a smi.
Register dst = index;
- lea(dst, Operand(object, index, times_pointer_size,
+ leap(dst, Operand(object, index, times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
}
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- ASSERT(!object.is(value));
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ SaveFPRegsMode fp_mode) {
+ DCHECK(!object.is(kScratchRegister));
+ DCHECK(!object.is(map));
+ DCHECK(!object.is(dst));
+ DCHECK(!map.is(dst));
AssertNotSmi(object);
- if (remembered_set_action == OMIT_REMEMBERED_SET &&
- !FLAG_incremental_marking) {
+ if (emit_debug_code()) {
+ Label ok;
+ if (map.is(kScratchRegister)) pushq(map);
+ CompareMap(map, isolate()->factory()->meta_map());
+ if (map.is(kScratchRegister)) popq(map);
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ if (!FLAG_incremental_marking) {
return;
}
if (emit_debug_code()) {
Label ok;
- cmpq(value, Operand(address, 0));
+ if (map.is(kScratchRegister)) pushq(map);
+ cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
+ if (map.is(kScratchRegister)) popq(map);
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
}
+ // Compute the address.
+ leap(dst, FieldOperand(object, HeapObject::kMapOffset));
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlag(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+
+ bind(&done);
+
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Move(dst, kZapValue, Assembler::RelocInfoNone());
+ Move(map, kZapValue, Assembler::RelocInfoNone());
+ }
+}
+
+
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ DCHECK(!object.is(value));
+ DCHECK(!object.is(address));
+ DCHECK(!value.is(address));
+ AssertNotSmi(object);
+
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ Label ok;
+ cmpp(value, Operand(address, 0));
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
// First, check if a write barrier is even needed. The tests below
// catch stores of smis and stores into the young generation.
Label done;
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+ }
CheckPageFlag(object,
value, // Used as scratch.
&done,
Label::kNear);
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
bind(&done);
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
- testq(rsp, Immediate(frame_alignment_mask));
+ testp(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
// Abort if stack is not aligned.
int3();
void MacroAssembler::Abort(BailoutReason reason) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
}
#endif
- push(rax);
- Move(kScratchRegister, reinterpret_cast<Smi*>(p0),
+ Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
Assembler::RelocInfoNone());
- push(kScratchRegister);
- Move(kScratchRegister, Smi::FromInt(static_cast<int>(p1 - p0)),
- Assembler::RelocInfoNone());
- push(kScratchRegister);
+ Push(kScratchRegister);
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Control will not return here.
int3();
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
+ DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::StubReturn(int argc) {
- ASSERT(argc >= 1 && generating_stub());
+ DCHECK(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
}
}
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addq(rsp, Immediate(num_arguments * kPointerSize));
- }
- LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-}
-
-
void MacroAssembler::IndexFromHash(Register hash, Register index) {
// The assert checks that the constants for the maximum number of digits
// for an array index cached in the hash field and the number of bits
// reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. Even if we subsequently go to
- // the slow case, converting the key to a smi is always valid.
- // key: string key
- // hash: key's hash field, including its array index value.
- and_(hash, Immediate(String::kArrayIndexValueMask));
- shr(hash, Immediate(String::kHashShift));
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- Integer32ToSmi(index, hash);
+ if (!hash.is(index)) {
+ movl(index, hash);
+ }
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
}
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// smarter.
Set(rax, num_arguments);
LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size, save_doubles);
+ CEntryStub ces(isolate(), f->result_size, save_doubles);
CallStub(&ces);
}
Set(rax, num_arguments);
LoadAddress(rbx, ext);
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
CallStub(&stub);
}
static int Offset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
// Check that fits into int.
- ASSERT(static_cast<int>(offset) == offset);
+ DCHECK(static_cast<int>(offset) == offset);
return static_cast<int>(offset);
}
void MacroAssembler::CallApiFunctionAndReturn(
Register function_address,
- Address thunk_address,
+ ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
Operand return_value_operand,
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address(isolate());
- ASSERT(rdx.is(function_address) || r8.is(function_address));
+ DCHECK(rdx.is(function_address) || r8.is(function_address));
// Allocate HandleScope in callee-save registers.
Register prev_next_address_reg = r14;
Register prev_limit_reg = rbx;
Label profiler_disabled;
Label end_profiler_check;
- bool* is_profiling_flag =
- isolate()->cpu_profiler()->is_profiling_address();
- STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- Move(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE);
+ Move(rax, ExternalReference::is_profiling_address(isolate()));
cmpb(Operand(rax, 0), Immediate(0));
j(zero, &profiler_disabled);
// Third parameter is the address of the actual getter function.
Move(thunk_last_arg, function_address);
- Move(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE);
+ Move(rax, thunk_ref);
jmp(&end_profiler_check);
bind(&profiler_disabled);
// previous handle scope.
subl(Operand(base_reg, kLevelOffset), Immediate(1));
movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
- cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
int result_size) {
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
- CEntryStub ces(result_size);
- jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub ces(isolate(), result_size);
+ jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(rdi));
+ DCHECK(!target.is(rdi));
// Load the JavaScript builtin function from the builtins object.
GetBuiltinFunction(rdi, id);
movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
for (int i = 0; i < kNumberOfSavedRegs; i++) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- push(reg);
+ pushq(reg);
}
}
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- subq(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
+ subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movups(Operand(rsp, i * kSIMD128Size), reg);
+ movsd(Operand(rsp, i * kDoubleSize), reg);
}
}
}
if (fp_mode == kSaveFPRegs) {
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movups(reg, Operand(rsp, i * kSIMD128Size));
+ movsd(reg, Operand(rsp, i * kDoubleSize));
}
- addq(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
+ addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- pop(reg);
+ popq(reg);
}
}
}
void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
- ASSERT(!r.IsDouble());
+ DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
movsxbq(dst, src);
} else if (r.IsUInteger8()) {
void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
- ASSERT(!r.IsDouble());
+ DCHECK(!r.IsDouble());
if (r.IsInteger8() || r.IsUInteger8()) {
movb(dst, src);
} else if (r.IsInteger16() || r.IsUInteger16()) {
} else if (r.IsInteger32()) {
movl(dst, src);
} else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
movp(dst, src);
}
}
}
-void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
+void MacroAssembler::Set(const Operand& dst, intptr_t x) {
+ if (kPointerSize == kInt64Size) {
+ if (is_int32(x)) {
+ movp(dst, Immediate(static_cast<int32_t>(x)));
+ } else {
+ Set(kScratchRegister, x);
+ movp(dst, kScratchRegister);
+ }
} else {
- Set(kScratchRegister, x);
- movq(dst, kScratchRegister);
+ movp(dst, Immediate(static_cast<int32_t>(x)));
}
}
void MacroAssembler::SafeMove(Register dst, Smi* src) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
+ DCHECK(!dst.is(kScratchRegister));
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
- Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
- Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(dst, kScratchRegister);
+ if (SmiValuesAre32Bits()) {
+ // JIT cookie can be converted to Smi.
+ Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xorp(dst, kScratchRegister);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
+ movp(dst, Immediate(value ^ jit_cookie()));
+ xorp(dst, Immediate(jit_cookie()));
+ }
} else {
Move(dst, src);
}
void MacroAssembler::SafePush(Smi* src) {
- ASSERT(SmiValuesAre32Bits()); // JIT cookie can be converted to Smi.
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
- Push(Smi::FromInt(src->value() ^ jit_cookie()));
- Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(Operand(rsp, 0), kScratchRegister);
+ if (SmiValuesAre32Bits()) {
+ // JIT cookie can be converted to Smi.
+ Push(Smi::FromInt(src->value() ^ jit_cookie()));
+ Move(kScratchRegister, Smi::FromInt(jit_cookie()));
+ xorp(Operand(rsp, 0), kScratchRegister);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
+ Push(Immediate(value ^ jit_cookie()));
+ xorp(Operand(rsp, 0), Immediate(jit_cookie()));
+ }
} else {
Push(src);
}
if (emit_debug_code()) {
Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
Assembler::RelocInfoNone());
- cmpq(dst, kSmiConstantRegister);
+ cmpp(dst, kSmiConstantRegister);
Assert(equal, kUninitializedKSmiConstantRegister);
}
int value = source->value();
switch (uvalue) {
case 9:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
break;
case 8:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
break;
case 4:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
break;
case 5:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
break;
case 3:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
break;
case 2:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
break;
case 1:
movp(dst, kSmiConstantRegister);
return;
}
if (negative) {
- neg(dst);
+ negp(dst);
}
}
if (!dst.is(src)) {
movl(dst, src);
}
- shl(dst, Immediate(kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
}
Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
bind(&ok);
}
- ASSERT(kSmiShift % kBitsPerByte == 0);
- movl(Operand(dst, kSmiShift / kBitsPerByte), src);
+
+ if (SmiValuesAre32Bits()) {
+ DCHECK(kSmiShift % kBitsPerByte == 0);
+ movl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ Integer32ToSmi(kScratchRegister, src);
+ movp(dst, kScratchRegister);
+ }
}
} else {
leal(dst, Operand(src, constant));
}
- shl(dst, Immediate(kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
}
if (!dst.is(src)) {
movp(dst, src);
}
- shr(dst, Immediate(kSmiShift));
+
+ if (SmiValuesAre32Bits()) {
+ shrp(dst, Immediate(kSmiShift));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ sarl(dst, Immediate(kSmiShift));
+ }
}
void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
- movl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ movl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ movl(dst, src);
+ sarl(dst, Immediate(kSmiShift));
+ }
}
if (!dst.is(src)) {
movp(dst, src);
}
- sar(dst, Immediate(kSmiShift));
+ sarp(dst, Immediate(kSmiShift));
+ if (kPointerSize == kInt32Size) {
+ // Sign extend to 64-bit.
+ movsxlq(dst, dst);
+ }
}
void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
- movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ movp(dst, src);
+ SmiToInteger64(dst, dst);
+ }
}
void MacroAssembler::SmiTest(Register src) {
AssertSmi(src);
- testq(src, src);
+ testp(src, src);
}
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
- cmpq(smi1, smi2);
+ cmpp(smi1, smi2);
}
void MacroAssembler::Cmp(Register dst, Smi* src) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
if (src->value() == 0) {
- testq(dst, dst);
+ testp(dst, dst);
} else {
Register constant_reg = GetSmiConstant(src);
- cmpq(dst, constant_reg);
+ cmpp(dst, constant_reg);
}
}
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
AssertSmi(dst);
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
+ if (SmiValuesAre32Bits()) {
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ cmpl(dst, Immediate(src));
+ }
}
void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
- ASSERT(!dst.AddressUsesRegister(smi_reg));
- cmpq(dst, smi_reg);
+ DCHECK(!dst.AddressUsesRegister(smi_reg));
+ cmpp(dst, smi_reg);
}
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ if (SmiValuesAre32Bits()) {
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ SmiToInteger32(kScratchRegister, dst);
+ cmpl(kScratchRegister, src);
+ }
}
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
Register src,
int power) {
- ASSERT(power >= 0);
- ASSERT(power < 64);
+ DCHECK(power >= 0);
+ DCHECK(power < 64);
if (power == 0) {
SmiToInteger64(dst, src);
return;
movp(dst, src);
}
if (power < kSmiShift) {
- sar(dst, Immediate(kSmiShift - power));
+ sarp(dst, Immediate(kSmiShift - power));
} else if (power > kSmiShift) {
- shl(dst, Immediate(power - kSmiShift));
+ shlp(dst, Immediate(power - kSmiShift));
}
}
void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
Register src,
int power) {
- ASSERT((0 <= power) && (power < 32));
+ DCHECK((0 <= power) && (power < 32));
if (dst.is(src)) {
- shr(dst, Immediate(power + kSmiShift));
+ shrp(dst, Immediate(power + kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
Label* on_not_smis,
Label::Distance near_jump) {
if (dst.is(src1) || dst.is(src2)) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
movp(kScratchRegister, src1);
- or_(kScratchRegister, src2);
+ orp(kScratchRegister, src2);
JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
movp(dst, kScratchRegister);
} else {
movp(dst, src1);
- or_(dst, src2);
+ orp(dst, src2);
JumpIfNotSmi(dst, on_not_smis, near_jump);
}
}
STATIC_ASSERT(kSmiTag == 0);
// Test that both bits of the mask 0x8000000000000001 are zero.
movp(kScratchRegister, src);
- rol(kScratchRegister, Immediate(1));
+ rolp(kScratchRegister, Immediate(1));
testb(kScratchRegister, Immediate(3));
return zero;
}
return CheckSmi(first);
}
STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
- leal(kScratchRegister, Operand(first, second, times_1, 0));
- testb(kScratchRegister, Immediate(0x03));
+ if (SmiValuesAre32Bits()) {
+ leal(kScratchRegister, Operand(first, second, times_1, 0));
+ testb(kScratchRegister, Immediate(0x03));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ movl(kScratchRegister, first);
+ orl(kScratchRegister, second);
+ testb(kScratchRegister, Immediate(kSmiTagMask));
+ }
return zero;
}
return CheckNonNegativeSmi(first);
}
movp(kScratchRegister, first);
- or_(kScratchRegister, second);
- rol(kScratchRegister, Immediate(1));
+ orp(kScratchRegister, second);
+ rolp(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(3));
return zero;
}
Condition MacroAssembler::CheckIsMinSmi(Register src) {
- ASSERT(!src.is(kScratchRegister));
+ DCHECK(!src.is(kScratchRegister));
// If we overflow by subtracting one, it's the minimal smi value.
- cmpq(src, kSmiConstantRegister);
+ cmpp(src, kSmiConstantRegister);
return overflow;
}
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
- // A 32-bit integer value can always be converted to a smi.
- return always;
+ if (SmiValuesAre32Bits()) {
+ // A 32-bit integer value can always be converted to a smi.
+ return always;
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ cmpl(src, Immediate(0xc0000000));
+ return positive;
+ }
}
Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
- // An unsigned 32-bit integer value is valid as long as the high bit
- // is not set.
- testl(src, src);
- return positive;
+ if (SmiValuesAre32Bits()) {
+ // An unsigned 32-bit integer value is valid as long as the high bit
+ // is not set.
+ testl(src, src);
+ return positive;
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ testl(src, Immediate(0xc0000000));
+ return zero;
+ }
}
}
+void MacroAssembler::JumpIfValidSmiValue(Register src,
+ Label* on_valid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckInteger32ValidSmiValue(src);
+ j(is_valid, on_valid, near_jump);
+}
+
+
void MacroAssembler::JumpIfNotValidSmiValue(Register src,
Label* on_invalid,
Label::Distance near_jump) {
}
+void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
+ Label* on_valid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckUInteger32ValidSmiValue(src);
+ j(is_valid, on_valid, near_jump);
+}
+
+
void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
Label* on_invalid,
Label::Distance near_jump) {
}
return;
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
switch (constant->value()) {
case 1:
- addq(dst, kSmiConstantRegister);
+ addp(dst, kSmiConstantRegister);
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
Register constant_reg = GetSmiConstant(constant);
- addq(dst, constant_reg);
+ addp(dst, constant_reg);
return;
}
} else {
switch (constant->value()) {
case 1:
- lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
return;
}
}
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
if (constant->value() != 0) {
- addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
+ if (SmiValuesAre32Bits()) {
+ addl(Operand(dst, kSmiShift / kBitsPerByte),
+ Immediate(constant->value()));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ addp(dst, Immediate(constant));
+ }
}
}
movp(dst, src);
}
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
- ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- subq(dst, kScratchRegister);
+ DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ subp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
CHECK(mode.IsEmpty());
}
} else {
- ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
+ DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
movp(dst, src);
}
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- subq(dst, constant_reg);
+ subp(dst, constant_reg);
} else {
if (constant->value() == Smi::kMinValue) {
LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
- addq(dst, src);
+ addp(dst, src);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
- addq(dst, src);
+ addp(dst, src);
}
}
}
movp(dst, src);
}
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
- ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- addq(dst, kScratchRegister);
+ DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ addp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
CHECK(mode.IsEmpty());
}
} else {
- ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
+ DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
if (constant->value() == Smi::kMinValue) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
movp(dst, src);
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
j(overflow, bailout_label, near_jump);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
Label* on_smi_result,
Label::Distance near_jump) {
if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
movp(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
+ negp(dst); // Low 32 bits are retained as zero by negation.
// Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
j(not_equal, on_smi_result, near_jump);
movp(src, kScratchRegister);
} else {
movp(dst, src);
- neg(dst);
- cmpq(dst, src);
+ negp(dst);
+ cmpp(dst, src);
// If the result is zero or Smi::kMinValue, negation failed to create a smi.
j(not_equal, on_smi_result, near_jump);
}
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
masm->movp(dst, src1);
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
+ DCHECK_NOT_NULL(on_not_smi_result);
+ DCHECK(!dst.is(src2));
SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
const Operand& src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!src2.AddressUsesRegister(dst));
+ DCHECK_NOT_NULL(on_not_smi_result);
+ DCHECK(!src2.AddressUsesRegister(dst));
SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
if (!dst.is(src1)) {
if (emit_debug_code()) {
movp(kScratchRegister, src1);
- addq(kScratchRegister, src2);
+ addp(kScratchRegister, src2);
Check(no_overflow, kSmiAdditionOverflow);
}
- lea(dst, Operand(src1, src2, times_1, 0));
+ leap(dst, Operand(src1, src2, times_1, 0));
} else {
- addq(dst, src2);
+ addp(dst, src2);
Assert(no_overflow, kSmiAdditionOverflow);
}
}
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
masm->movp(dst, src1);
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
+ DCHECK_NOT_NULL(on_not_smi_result);
+ DCHECK(!dst.is(src2));
SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
const Operand& src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!src2.AddressUsesRegister(dst));
+ DCHECK_NOT_NULL(on_not_smi_result);
+ DCHECK(!src2.AddressUsesRegister(dst));
SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
if (!dst.is(src1)) {
masm->movp(dst, src1);
}
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->Assert(no_overflow, kSmiSubtractionOverflow);
}
void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
- ASSERT(!dst.is(src2));
+ DCHECK(!dst.is(src2));
SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
}
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT(!dst.is(src2));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(src2));
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
if (dst.is(src1)) {
Label failure, zero_correct_result;
movp(kScratchRegister, src1); // Create backup for later testing.
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, &failure, Label::kNear);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
movp(dst, kScratchRegister);
- xor_(dst, src2);
+ xorp(dst, src2);
// Result was positive zero.
j(positive, &zero_correct_result, Label::kNear);
bind(&correct_result);
} else {
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, on_not_smi_result, near_jump);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
// One of src1 and src2 is zero, the check whether the other is
// negative.
movp(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
+ xorp(kScratchRegister, src2);
j(negative, on_not_smi_result, near_jump);
bind(&correct_result);
}
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src2.is(rax));
+ DCHECK(!src2.is(rdx));
+ DCHECK(!src1.is(rdx));
// Check for 0 divisor (result is +/-Infinity).
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
// We overshoot a little and go to slow case if we divide min-value
// by any negative value, not just -1.
Label safe_div;
- testl(rax, Immediate(0x7fffffff));
+ testl(rax, Immediate(~Smi::kMinValue));
j(not_zero, &safe_div, Label::kNear);
- testq(src2, src2);
+ testp(src2, src2);
if (src1.is(rax)) {
j(positive, &safe_div, Label::kNear);
movp(src1, kScratchRegister);
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
- ASSERT(!src1.is(src2));
-
- testq(src2, src2);
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!src2.is(rax));
+ DCHECK(!src2.is(rdx));
+ DCHECK(!src1.is(rdx));
+ DCHECK(!src1.is(src2));
+
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
Label smi_result;
testl(rdx, rdx);
j(not_zero, &smi_result, Label::kNear);
- testq(src1, src1);
+ testp(src1, src1);
j(negative, on_not_smi_result, near_jump);
bind(&smi_result);
Integer32ToSmi(dst, rdx);
void MacroAssembler::SmiNot(Register dst, Register src) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
- // Set tag and padding bits before negating, so that they are zero afterwards.
- movl(kScratchRegister, Immediate(~0));
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src.is(kScratchRegister));
+ if (SmiValuesAre32Bits()) {
+ // Set tag and padding bits before negating, so that they are zero
+ // afterwards.
+ movl(kScratchRegister, Immediate(~0));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ movl(kScratchRegister, Immediate(1));
+ }
if (dst.is(src)) {
- xor_(dst, kScratchRegister);
+ xorp(dst, kScratchRegister);
} else {
- lea(dst, Operand(src, kScratchRegister, times_1, 0));
+ leap(dst, Operand(src, kScratchRegister, times_1, 0));
}
- not_(dst);
+ notp(dst);
}
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
- ASSERT(!dst.is(src2));
+ DCHECK(!dst.is(src2));
if (!dst.is(src1)) {
movp(dst, src1);
}
- and_(dst, src2);
+ andp(dst, src2);
}
if (constant->value() == 0) {
Set(dst, 0);
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- and_(dst, constant_reg);
+ andp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- and_(dst, src);
+ andp(dst, src);
}
}
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
- ASSERT(!src1.is(src2));
+ DCHECK(!src1.is(src2));
movp(dst, src1);
}
- or_(dst, src2);
+ orp(dst, src2);
}
void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- or_(dst, constant_reg);
+ orp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- or_(dst, src);
+ orp(dst, src);
}
}
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
- ASSERT(!src1.is(src2));
+ DCHECK(!src1.is(src2));
movp(dst, src1);
}
- xor_(dst, src2);
+ xorp(dst, src2);
}
void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- xor_(dst, constant_reg);
+ xorp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- xor_(dst, src);
+ xorp(dst, src);
}
}
void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value) {
- ASSERT(is_uint5(shift_value));
+ DCHECK(is_uint5(shift_value));
if (shift_value > 0) {
if (dst.is(src)) {
- sar(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
+ sarp(dst, Immediate(shift_value + kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
void MacroAssembler::SmiShiftLeftConstant(Register dst,
Register src,
- int shift_value) {
- if (!dst.is(src)) {
- movp(dst, src);
- }
- if (shift_value > 0) {
- shl(dst, Immediate(shift_value));
+ int shift_value,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (SmiValuesAre32Bits()) {
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ if (shift_value > 0) {
+ // Shift amount specified by lower 5 bits, not six as the shl opcode.
+ shlq(dst, Immediate(shift_value & 0x1f));
+ }
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ if (dst.is(src)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ SmiToInteger32(dst, src);
+ shll(dst, Immediate(shift_value));
+ JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
+ Integer32ToSmi(dst, dst);
+ }
}
}
if (dst.is(src)) {
UNIMPLEMENTED(); // Not used.
} else {
- movp(dst, src);
if (shift_value == 0) {
- testq(dst, dst);
+ testp(src, src);
j(negative, on_not_smi_result, near_jump);
}
- shr(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
+ if (SmiValuesAre32Bits()) {
+ movp(dst, src);
+ shrp(dst, Immediate(shift_value + kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ SmiToInteger32(dst, src);
+ shrp(dst, Immediate(shift_value));
+ JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
+ Integer32ToSmi(dst, dst);
+ }
}
}
void MacroAssembler::SmiShiftLeft(Register dst,
Register src1,
- Register src2) {
- ASSERT(!dst.is(rcx));
- // Untag shift amount.
- if (!dst.is(src1)) {
- movq(dst, src1);
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (SmiValuesAre32Bits()) {
+ DCHECK(!dst.is(rcx));
+ if (!dst.is(src1)) {
+ movp(dst, src1);
+ }
+ // Untag shift amount.
+ SmiToInteger32(rcx, src2);
+ // Shift amount specified by lower 5 bits, not six as the shl opcode.
+ andp(rcx, Immediate(0x1f));
+ shlq_cl(dst);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(src2));
+ DCHECK(!dst.is(rcx));
+
+ if (src1.is(rcx) || src2.is(rcx)) {
+ movq(kScratchRegister, rcx);
+ }
+ if (dst.is(src1)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ Label valid_result;
+ SmiToInteger32(dst, src1);
+ SmiToInteger32(rcx, src2);
+ shll_cl(dst);
+ JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
+ // As src1 or src2 could not be dst, we do not need to restore them for
+ // clobbering dst.
+ if (src1.is(rcx) || src2.is(rcx)) {
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&valid_result);
+ Integer32ToSmi(dst, dst);
+ }
}
- SmiToInteger32(rcx, src2);
- // Shift amount specified by lower 5 bits, not six as the shl opcode.
- and_(rcx, Immediate(0x1f));
- shl_cl(dst);
}
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- // dst and src1 can be the same, because the one case that bails out
- // is a shift by 0, which leaves dst, and therefore src1, unchanged.
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(src2));
+ DCHECK(!dst.is(rcx));
if (src1.is(rcx) || src2.is(rcx)) {
movq(kScratchRegister, rcx);
}
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
- shl(dst, Immediate(kSmiShift));
- testq(dst, dst);
- if (src1.is(rcx) || src2.is(rcx)) {
- Label positive_result;
- j(positive, &positive_result, Label::kNear);
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else {
- movq(src2, kScratchRegister);
- }
- jmp(on_not_smi_result, near_jump);
- bind(&positive_result);
+ if (dst.is(src1)) {
+ UNIMPLEMENTED(); // Not used.
} else {
- // src2 was zero and src1 negative.
- j(negative, on_not_smi_result, near_jump);
+ Label valid_result;
+ SmiToInteger32(dst, src1);
+ SmiToInteger32(rcx, src2);
+ shrl_cl(dst);
+ JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
+ // As src1 or src2 could not be dst, we do not need to restore them for
+ // clobbering dst.
+ if (src1.is(rcx) || src2.is(rcx)) {
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&valid_result);
+ Integer32ToSmi(dst, dst);
}
}
void MacroAssembler::SmiShiftArithmeticRight(Register dst,
Register src1,
Register src2) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- if (src1.is(rcx)) {
- movp(kScratchRegister, src1);
- } else if (src2.is(rcx)) {
- movp(kScratchRegister, src2);
- }
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(rcx));
+
+ SmiToInteger32(rcx, src2);
if (!dst.is(src1)) {
movp(dst, src1);
}
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- sar_cl(dst); // Shift 32 + original rcx & 0x1f.
- shl(dst, Immediate(kSmiShift));
- if (src1.is(rcx)) {
- movp(src1, kScratchRegister);
- } else if (src2.is(rcx)) {
- movp(src2, kScratchRegister);
- }
+ SmiToInteger32(dst, dst);
+ sarl_cl(dst);
+ Integer32ToSmi(dst, dst);
}
Register src2,
Label* on_not_smis,
Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(src1));
- ASSERT(!dst.is(src2));
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(src1));
+ DCHECK(!dst.is(src2));
// Both operands must not be smis.
#ifdef DEBUG
Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
#endif
STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(0, Smi::FromInt(0));
movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
+ andp(kScratchRegister, src1);
testl(kScratchRegister, src2);
// If non-zero then both are smis.
j(not_zero, on_not_smis, near_jump);
// Exactly one operand is a smi.
- ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+ DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
+ subp(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
movp(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
+ xorp(dst, src2);
+ andp(dst, kScratchRegister);
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
+ xorp(dst, src1);
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
}
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
- ASSERT(is_uint6(shift));
- // There is a possible optimization if shift is in the range 60-63, but that
- // will (and must) never happen.
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
+ if (SmiValuesAre32Bits()) {
+ DCHECK(is_uint6(shift));
+ // There is a possible optimization if shift is in the range 60-63, but that
+ // will (and must) never happen.
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ if (shift < kSmiShift) {
+ sarp(dst, Immediate(kSmiShift - shift));
+ } else {
+ shlp(dst, Immediate(shift - kSmiShift));
+ }
+ return SmiIndex(dst, times_1);
} else {
- shl(dst, Immediate(shift - kSmiShift));
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ // We have to sign extend the index register to 64-bit as the SMI might
+ // be negative.
+ movsxlq(dst, dst);
+ if (shift == times_1) {
+ sarq(dst, Immediate(kSmiShift));
+ return SmiIndex(dst, times_1);
+ }
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
}
- return SmiIndex(dst, times_1);
}
+
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
Register src,
int shift) {
- // Register src holds a positive smi.
- ASSERT(is_uint6(shift));
- if (!dst.is(src)) {
- movq(dst, src);
- }
- neg(dst);
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
+ if (SmiValuesAre32Bits()) {
+ // Register src holds a positive smi.
+ DCHECK(is_uint6(shift));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ negp(dst);
+ if (shift < kSmiShift) {
+ sarp(dst, Immediate(kSmiShift - shift));
+ } else {
+ shlp(dst, Immediate(shift - kSmiShift));
+ }
+ return SmiIndex(dst, times_1);
} else {
- shl(dst, Immediate(shift - kSmiShift));
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ negq(dst);
+ if (shift == times_1) {
+ sarq(dst, Immediate(kSmiShift));
+ return SmiIndex(dst, times_1);
+ }
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
}
- return SmiIndex(dst, times_1);
}
void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
- ASSERT_EQ(0, kSmiShift % kBitsPerByte);
- addl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ if (SmiValuesAre32Bits()) {
+ DCHECK_EQ(0, kSmiShift % kBitsPerByte);
+ addl(dst, Operand(src, kSmiShift / kBitsPerByte));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ SmiToInteger32(kScratchRegister, src);
+ addl(dst, kScratchRegister);
+ }
}
void MacroAssembler::Push(Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
+ Push(Immediate(static_cast<int32_t>(smi)));
} else {
Register constant = GetSmiConstant(source);
- push(constant);
+ Push(constant);
}
}
-void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
+void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
+ DCHECK(!src.is(scratch));
movp(scratch, src);
// High bits.
- shr(src, Immediate(64 - kSmiShift));
- shl(src, Immediate(kSmiShift));
- push(src);
+ shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
+ shlp(src, Immediate(kSmiShift));
+ Push(src);
// Low bits.
- shl(scratch, Immediate(kSmiShift));
- push(scratch);
+ shlp(scratch, Immediate(kSmiShift));
+ Push(scratch);
}
-void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
- pop(scratch);
+void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
+ DCHECK(!dst.is(scratch));
+ Pop(scratch);
// Low bits.
- shr(scratch, Immediate(kSmiShift));
- pop(dst);
- shr(dst, Immediate(kSmiShift));
+ shrp(scratch, Immediate(kSmiShift));
+ Pop(dst);
+ shrp(dst, Immediate(kSmiShift));
// High bits.
- shl(dst, Immediate(64 - kSmiShift));
- or_(dst, scratch);
+ shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
+ orp(dst, scratch);
}
void MacroAssembler::Test(const Operand& src, Smi* source) {
- testl(Operand(src, kIntSize), Immediate(source->value()));
+ if (SmiValuesAre32Bits()) {
+ testl(Operand(src, kIntSize), Immediate(source->value()));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ testl(src, Immediate(source));
+ }
}
SmiToInteger32(
mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
shrl(mask, Immediate(1));
- subq(mask, Immediate(1)); // Make mask.
+ subp(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
STATIC_ASSERT(8 == kDoubleSize);
movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- and_(scratch, mask);
+ xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup.
- shl(scratch, Immediate(kPointerSizeLog2 + 1));
+ shlp(scratch, Immediate(kPointerSizeLog2 + 1));
Register index = scratch;
Register probe = mask;
bind(&is_smi);
SmiToInteger32(scratch, object);
- and_(scratch, mask);
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup.
- shl(scratch, Immediate(kPointerSizeLog2 + 1));
+ shlp(scratch, Immediate(kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
- cmpq(object,
+ cmpp(object,
FieldOperand(number_string_cache,
index,
times_1,
}
-void MacroAssembler::absps(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } float_absolute_constant =
- { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
- Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_absolute_constant));
- andps(dst, Operand(kScratchRegister, 0));
-}
-
-
-void MacroAssembler::negateps(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } float_negate_constant =
- { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
- Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_negate_constant));
- xorps(dst, Operand(kScratchRegister, 0));
-}
-
-
-void MacroAssembler::notps(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } float_not_constant =
- { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
- Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_not_constant));
- xorps(dst, Operand(kScratchRegister, 0));
-}
-
-
-void MacroAssembler::pnegd(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } int32_one_constant = { 0x1, 0x1, 0x1, 0x1 };
- notps(dst);
- Set(kScratchRegister, reinterpret_cast<intptr_t>(&int32_one_constant));
- paddd(dst, Operand(kScratchRegister, 0));
-}
-
-
-
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
// Check that both are flat ASCII strings.
- ASSERT(kNotStringTag != 0);
+ DCHECK(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
const int kFlatAsciiStringTag =
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
movp(scratch2, second_object_instance_type);
// Check that both are flat ASCII strings.
- ASSERT(kNotStringTag != 0);
+ DCHECK(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
const int kFlatAsciiStringTag =
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
Push(Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- push(kScratchRegister);
+ Push(kScratchRegister);
}
}
void MacroAssembler::MoveHeapObject(Register result,
Handle<Object> object) {
AllowDeferredHandleDereference using_raw_address;
- ASSERT(object->IsHeapObject());
+ DCHECK(object->IsHeapObject());
if (isolate()->heap()->InNewSpace(*object)) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
Move(result, cell, RelocInfo::CELL);
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- addq(rsp, Immediate(stack_elements * kPointerSize));
+ addp(rsp, Immediate(stack_elements * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::DropUnderReturnAddress(int stack_elements,
+ Register scratch) {
+ DCHECK(stack_elements > 0);
+ if (kPointerSize == kInt64Size && stack_elements == 1) {
+ popq(MemOperand(rsp, 0));
+ return;
+ }
+
+ PopReturnAddressTo(scratch);
+ Drop(stack_elements);
+ PushReturnAddressFrom(scratch);
+}
+
+
+void MacroAssembler::Push(Register src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ // x32 uses 64-bit push for rbp in the prologue.
+ DCHECK(src.code() != rbp.code());
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), src);
+ }
+}
+
+
+void MacroAssembler::Push(const Operand& src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ movp(kScratchRegister, src);
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::PushQuad(const Operand& src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ movp(kScratchRegister, src);
+ pushq(kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::Push(Immediate value) {
+ if (kPointerSize == kInt64Size) {
+ pushq(value);
+ } else {
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), value);
+ }
+}
+
+
+void MacroAssembler::PushImm32(int32_t imm32) {
+ if (kPointerSize == kInt64Size) {
+ pushq_imm32(imm32);
+ } else {
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), Immediate(imm32));
+ }
+}
+
+
+void MacroAssembler::Pop(Register dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ // x32 uses 64-bit pop for rbp in the epilogue.
+ DCHECK(dst.code() != rbp.code());
+ movp(dst, Operand(rsp, 0));
+ leal(rsp, Operand(rsp, 4));
+ }
+}
+
+
+void MacroAssembler::Pop(const Operand& dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ Register scratch = dst.AddressUsesRegister(kScratchRegister)
+ ? kSmiConstantRegister : kScratchRegister;
+ movp(scratch, Operand(rsp, 0));
+ movp(dst, scratch);
+ leal(rsp, Operand(rsp, 4));
+ if (scratch.is(kSmiConstantRegister)) {
+ // Restore kSmiConstantRegister.
+ movp(kSmiConstantRegister,
+ reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
+ Assembler::RelocInfoNone());
+ }
+ }
+}
+
+
+void MacroAssembler::PopQuad(const Operand& dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ popq(kScratchRegister);
+ movp(dst, kScratchRegister);
}
}
-void MacroAssembler::TestBit(const Operand& src, int bits) {
+void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
+ Register base,
+ int offset) {
+ DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
+ offset <= SharedFunctionInfo::kSize &&
+ (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
+ if (kPointerSize == kInt64Size) {
+ movsxlq(dst, FieldOperand(base, offset));
+ } else {
+ movp(dst, FieldOperand(base, offset));
+ SmiToInteger32(dst, dst);
+ }
+}
+
+
+void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
+ int offset,
+ int bits) {
+ DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
+ offset <= SharedFunctionInfo::kSize &&
+ (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
+ if (kPointerSize == kInt32Size) {
+ // On x32, this field is represented by SMI.
+ bits += kSmiShift;
+ }
int byte_offset = bits / kBitsPerByte;
int bit_in_byte = bits & (kBitsPerByte - 1);
- testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
+ testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte));
}
}
+void MacroAssembler::Jump(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ jmp(op);
+ } else {
+ movp(kScratchRegister, op);
+ jmp(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
Move(kScratchRegister, destination, rmode);
jmp(kScratchRegister);
}
+void MacroAssembler::Call(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ call(op);
+ } else {
+ movp(kScratchRegister, op);
+ call(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(destination);
#ifdef DEBUG
int end_position = pc_offset() + CallSize(code_object);
#endif
- ASSERT(RelocInfo::IsCodeTarget(rmode) ||
+ DCHECK(RelocInfo::IsCodeTarget(rmode) ||
rmode == RelocInfo::CODE_AGE_SEQUENCE);
call(code_object, rmode, ast_id);
#ifdef DEBUG
void MacroAssembler::Pushad() {
- push(rax);
- push(rcx);
- push(rdx);
- push(rbx);
+ Push(rax);
+ Push(rcx);
+ Push(rdx);
+ Push(rbx);
// Not pushing rsp or rbp.
- push(rsi);
- push(rdi);
- push(r8);
- push(r9);
+ Push(rsi);
+ Push(rdi);
+ Push(r8);
+ Push(r9);
// r10 is kScratchRegister.
- push(r11);
+ Push(r11);
// r12 is kSmiConstantRegister.
// r13 is kRootRegister.
- push(r14);
- push(r15);
+ Push(r14);
+ Push(r15);
STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
// Use lea for symmetry with Popad.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, -sp_delta));
+ leap(rsp, Operand(rsp, -sp_delta));
}
// Popad must not change the flags, so use lea instead of addq.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, sp_delta));
- pop(r15);
- pop(r14);
- pop(r11);
- pop(r9);
- pop(r8);
- pop(rdi);
- pop(rsi);
- pop(rbx);
- pop(rdx);
- pop(rcx);
- pop(rax);
+ leap(rsp, Operand(rsp, sp_delta));
+ Pop(r15);
+ Pop(r14);
+ Pop(r11);
+ Pop(r9);
+ Pop(r8);
+ Pop(rdi);
+ Pop(rsi);
+ Pop(rbx);
+ Pop(rdx);
+ Pop(rcx);
+ Pop(rax);
}
void MacroAssembler::Dropad() {
- addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
+ addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
}
// The frame pointer does not point to a JS frame so we save NULL for
// rbp. We expect the code throwing an exception to check rbp before
// dereferencing it to restore the context.
- push(Immediate(0)); // NULL frame pointer.
+ pushq(Immediate(0)); // NULL frame pointer.
Push(Smi::FromInt(0)); // No context.
} else {
- push(rbp);
- push(rsi);
+ pushq(rbp);
+ Push(rsi);
}
// Push the state and the code object.
unsigned state =
StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind);
- push(Immediate(state));
+ Push(Immediate(state));
Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- push(ExternalOperand(handler_address));
+ Push(ExternalOperand(handler_address));
// Set this new handler as the current one.
movp(ExternalOperand(handler_address), rsp);
}
void MacroAssembler::PopTryHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- pop(ExternalOperand(handler_address));
- addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ Pop(ExternalOperand(handler_address));
+ addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
// a fixed array of (smi-tagged) code offsets.
// rax = exception, rdi = code object, rdx = state.
movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
- shr(rdx, Immediate(StackHandler::kKindWidth));
+ shrp(rdx, Immediate(StackHandler::kKindWidth));
movp(rdx,
FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx);
- lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
+ leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
jmp(rdi);
}
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
movp(rsp, ExternalOperand(handler_address));
// Restore the next handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Restore the context and frame pointer.
- pop(rsi); // Context.
- pop(rbp); // Frame pointer.
+ Pop(rsi); // Context.
+ popq(rbp); // Frame pointer.
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
// rbp or rsi.
Label skip;
- testq(rsi, rsi);
+ testp(rsi, rsi);
j(zero, &skip, Label::kNear);
movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
bind(&skip);
j(not_zero, &fetch_next);
// Set the top handler address to next handler past the top ENTRY handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Clear the context pointer and frame pointer (0 was saved in the handler).
- pop(rsi);
- pop(rbp);
+ Pop(rsi);
+ popq(rbp);
JumpToHandlerEntry();
}
ret(bytes_dropped);
} else {
PopReturnAddressTo(scratch);
- addq(rsp, Immediate(bytes_dropped));
+ addp(rsp, Immediate(bytes_dropped));
PushReturnAddressFrom(scratch);
ret(0);
}
cvtsd2si(result_reg, input_reg);
testl(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
- cmpl(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
+ cmpl(result_reg, Immediate(1));
+ j(overflow, &conv_failure, Label::kNear);
movl(result_reg, Immediate(0));
- setcc(above, result_reg);
+ setcc(sign, result_reg);
subl(result_reg, Immediate(1));
andl(result_reg, Immediate(255));
jmp(&done, Label::kNear);
void MacroAssembler::LoadUint32(XMMRegister dst,
- Register src,
- XMMRegister scratch) {
+ Register src) {
if (FLAG_debug_code) {
cmpq(src, Immediate(0xffffffff));
Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
void MacroAssembler::SlowTruncateToI(Register result_reg,
Register input_reg,
int offset) {
- DoubleToIStub stub(input_reg, result_reg, offset, true);
- call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true);
+ call(stub.GetCode(), RelocInfo::CODE_TARGET);
}
Label done;
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2siq(result_reg, xmm0);
- Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- cmpq(result_reg, kScratchRegister);
- j(not_equal, &done, Label::kNear);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
// Slow case.
if (input_reg.is(result_reg)) {
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), xmm0);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
} else {
SlowTruncateToI(result_reg, input_reg);
}
bind(&done);
+ // Keep our invariant that the upper 32 bits are zero.
+ movl(result_reg, result_reg);
}
XMMRegister input_reg) {
Label done;
cvttsd2siq(result_reg, input_reg);
- movq(kScratchRegister, V8_INT64_C(0x8000000000000000));
- cmpq(result_reg, kScratchRegister);
- j(not_equal, &done, Label::kNear);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), input_reg);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
bind(&done);
+ // Keep our invariant that the upper 32 bits are zero.
+ movl(result_reg, result_reg);
}
Label* lost_precision,
Label::Distance dst) {
Label done;
- ASSERT(!temp.is(xmm0));
+ DCHECK(!temp.is(xmm0));
// Heap number map check.
CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
}
-void MacroAssembler::Throw(BailoutReason reason) {
-#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
- RecordComment("Throw message: ");
- RecordComment(msg);
- }
-#endif
-
- push(rax);
- Push(Smi::FromInt(reason));
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
- } else {
- CallRuntime(Runtime::kThrowMessage, 1);
- }
- // Control will not return here.
- int3();
-}
-
-
-void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
- Label L;
- j(NegateCondition(cc), &L);
- Throw(reason);
- // will not return here
- bind(&L);
-}
-
-
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- movp(dst, FieldOperand(map, Map::kBitField3Offset));
+ movl(dst, FieldOperand(map, Map::kBitField3Offset));
DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- movp(dst, FieldOperand(map, Map::kBitField3Offset));
- Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
- and_(dst, kScratchRegister);
+ movl(dst, FieldOperand(map, Map::kBitField3Offset));
+ andl(dst, Immediate(Map::EnumLengthBits::kMask));
+ Integer32ToSmi(dst, dst);
}
void MacroAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
- ASSERT(!int32_register.is(kScratchRegister));
+ DCHECK(!int32_register.is(kScratchRegister));
movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
cmpq(kScratchRegister, int32_register);
Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAString);
- push(object);
+ Push(object);
movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
+ Pop(object);
Check(below, kOperandIsNotAString);
}
}
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAName);
- push(object);
+ Push(object);
movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, LAST_NAME_TYPE);
- pop(object);
+ Pop(object);
Check(below_equal, kOperandIsNotAName);
}
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ Cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
+ Assert(equal, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
BailoutReason reason) {
if (emit_debug_code()) {
- ASSERT(!src.is(kScratchRegister));
+ DCHECK(!src.is(kScratchRegister));
LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
+ cmpp(src, kScratchRegister);
Check(equal, reason);
}
}
Register result,
Label* miss,
bool miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- testl(function, Immediate(kSmiTagMask));
- j(zero, miss);
+ Label non_instance;
+ if (miss_on_bound_function) {
+ // Check that the receiver isn't a smi.
+ testl(function, Immediate(kSmiTagMask));
+ j(zero, miss);
- // Check that the function really is a function.
- CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss);
+ // Check that the function really is a function.
+ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ j(not_equal, miss);
- if (miss_on_bound_function) {
movp(kScratchRegister,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
// It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
// field).
- TestBit(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kCompilerHintsOffset),
- SharedFunctionInfo::kBoundFunction);
+ TestBitSharedFunctionInfoSpecialField(kScratchRegister,
+ SharedFunctionInfo::kCompilerHintsOffset,
+ SharedFunctionInfo::kBoundFunction);
j(not_zero, miss);
- }
- // Make sure that the function has an instance prototype.
- Label non_instance;
- testb(FieldOperand(result, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance, Label::kNear);
+ // Make sure that the function has an instance prototype.
+ testb(FieldOperand(result, Map::kBitFieldOffset),
+ Immediate(1 << Map::kHasNonInstancePrototype));
+ j(not_zero, &non_instance, Label::kNear);
+ }
// Get the prototype or initial map from the function.
movp(result,
// Get the prototype from the initial map.
movp(result, FieldOperand(result, Map::kPrototypeOffset));
- jmp(&done, Label::kNear);
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- movp(result, FieldOperand(result, Map::kConstructorOffset));
+ if (miss_on_bound_function) {
+ jmp(&done, Label::kNear);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ movp(result, FieldOperand(result, Map::kConstructorOffset));
+ }
// All done.
bind(&done);
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand counter_operand = ExternalOperand(ExternalReference(counter));
if (value == 1) {
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand counter_operand = ExternalOperand(ExternalReference(counter));
if (value == 1) {
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
Set(rax, 0); // No arguments.
LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
- CEntryStub ces(1);
- ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
+ CEntryStub ces(isolate(), 1);
+ DCHECK(AllowThisStubCall(&ces));
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
-#endif // ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
Label done;
bool definitely_mismatches = false;
call(code);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
+ DCHECK(flag == JUMP_FUNCTION);
jmp(code);
}
bind(&done);
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
- ASSERT(function.is(rdi));
+ DCHECK(function.is(rdi));
movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
- movsxlq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ LoadSharedFunctionInfoSpecialField(rbx, rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset);
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
- ASSERT(function.is(rdi));
+ DCHECK(function.is(rdi));
movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
*definitely_mismatches = false;
Label invoke;
if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
+ DCHECK(actual.is_immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
+ cmpp(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
- ASSERT(expected.reg().is(rbx));
+ DCHECK(expected.reg().is(rbx));
Set(rax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
+ cmpp(expected.reg(), actual.reg());
j(equal, &invoke, Label::kNear);
- ASSERT(actual.reg().is(rax));
- ASSERT(expected.reg().is(rbx));
+ DCHECK(actual.reg().is(rax));
+ DCHECK(expected.reg().is(rbx));
}
}
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_register.is(rdx)) {
movp(rdx, code_register);
}
}
-void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
- if (frame_mode == BUILD_STUB_FRAME) {
- push(rbp); // Caller's frame pointer.
+void MacroAssembler::StubPrologue() {
+ pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
- push(rsi); // Callee's context.
+ Push(rsi); // Callee's context.
Push(Smi::FromInt(StackFrame::STUB));
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (code_pre_aging) {
+ // Pre-age the code.
+ Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
} else {
- PredictableCodeSizeScope predictible_code_size_scope(this,
- kNoCodeAgeSequenceLength);
- if (isolate()->IsCodePreAgingActive()) {
- // Pre-age the code.
- Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
- RelocInfo::CODE_AGE_SEQUENCE);
- Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
- } else {
- push(rbp); // Caller's frame pointer.
- movp(rbp, rsp);
- push(rsi); // Callee's context.
- push(rdi); // Callee's JS function.
- }
+ pushq(rbp); // Caller's frame pointer.
+ movp(rbp, rsp);
+ Push(rsi); // Callee's context.
+ Push(rdi); // Callee's JS function.
}
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(rbp);
+ pushq(rbp);
movp(rbp, rsp);
- push(rsi); // Context.
+ Push(rsi); // Context.
Push(Smi::FromInt(type));
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
+ Push(kScratchRegister);
if (emit_debug_code()) {
Move(kScratchRegister,
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
- cmpq(Operand(rsp, 0), kScratchRegister);
+ cmpp(Operand(rsp, 0), kScratchRegister);
Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
- cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
+ cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, kStackFrameTypesMustMatch);
}
movp(rsp, rbp);
- pop(rbp);
+ popq(rbp);
}
void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
// Set up the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement ==
+ DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
kFPOnStackSize + kPCOnStackSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
- push(rbp);
+ DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
+ DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ pushq(rbp);
movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
- ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // Saved entry sp, patched before call.
+ DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+ Push(Immediate(0)); // Saved entry sp, patched before call.
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister); // Accessed from EditFrame::code_slot.
+ Push(kScratchRegister); // Accessed from EditFrame::code_slot.
// Save the frame pointer and the context in top.
if (save_rax) {
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumAllocatableRegisters * kSIMD128Size +
+ int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
arg_stack_space * kRegisterSize;
- subq(rsp, Immediate(space));
+ subp(rsp, Immediate(space));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movups(Operand(rbp, offset - ((i + 1) * kSIMD128Size)), reg);
+ movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
- subq(rsp, Immediate(arg_stack_space * kRegisterSize));
+ subp(rsp, Immediate(arg_stack_space * kRegisterSize));
}
// Get the required frame alignment for the OS.
- const int kFrameAlignment = OS::ActivationFrameAlignment();
+ const int kFrameAlignment = base::OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
- ASSERT(IsPowerOf2(kFrameAlignment));
- ASSERT(is_int8(kFrameAlignment));
- and_(rsp, Immediate(-kFrameAlignment));
+ DCHECK(IsPowerOf2(kFrameAlignment));
+ DCHECK(is_int8(kFrameAlignment));
+ andp(rsp, Immediate(-kFrameAlignment));
}
// Patch the saved entry sp.
// Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, r14, times_pointer_size, offset));
+ leap(r15, Operand(rbp, r14, times_pointer_size, offset));
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movups(reg, Operand(rbp, offset - ((i + 1) * kSIMD128Size)));
+ movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
}
}
// Get the return address from the stack and restore the frame pointer.
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
- lea(rsp, Operand(r15, 1 * kPointerSize));
+ leap(rsp, Operand(r15, 1 * kPointerSize));
PushReturnAddressFrom(rcx);
void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
movp(rsp, rbp);
- pop(rbp);
+ popq(rbp);
LeaveExitFrameEpilogue(restore_context);
}
Label* miss) {
Label same_contexts;
- ASSERT(!holder_reg.is(scratch));
- ASSERT(!scratch.is(kScratchRegister));
+ DCHECK(!holder_reg.is(scratch));
+ DCHECK(!scratch.is(kScratchRegister));
// Load current lexical context from the stack frame.
movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
- cmpq(scratch, Immediate(0));
+ cmpp(scratch, Immediate(0));
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
}
// Check if both contexts are the same.
- cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens.
// Check the context is a native context.
if (emit_debug_code()) {
// Preserve original value of holder_reg.
- push(holder_reg);
+ Push(holder_reg);
movp(holder_reg,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
- pop(holder_reg);
+ Pop(holder_reg);
}
movp(kScratchRegister,
int token_offset =
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
movp(scratch, FieldOperand(scratch, token_offset));
- cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
+ cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
j(not_equal, miss);
bind(&same_contexts);
// Compute the hash code from the untagged key. This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
// First of all we assign the hash seed to scratch.
if (i > 0) {
addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
- and_(r2, r1);
+ andp(r2, r1);
// Scale the index by multiplying by the entry size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+ DCHECK(SeededNumberDictionary::kEntrySize == 3);
+ leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
- cmpq(key, FieldOperand(elements,
+ cmpp(key, FieldOperand(elements,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
// Check that the value is a normal propety.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
+ DCHECK_EQ(NORMAL, 0);
Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Smi::FromInt(PropertyDetails::TypeField::kMask));
j(not_zero, miss);
// Just return if allocation top is already known.
if ((flags & RESULT_CONTAINS_TOP) != 0) {
// No use of scratch if allocation top is provided.
- ASSERT(!scratch.is_valid());
+ DCHECK(!scratch.is_valid());
#ifdef DEBUG
// Assert that result actually contains top on entry.
Operand top_operand = ExternalOperand(allocation_top);
- cmpq(result, top_operand);
+ cmpp(result, top_operand);
Check(equal, kUnexpectedAllocationTop);
#endif
return;
}
+void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (kPointerSize == kDoubleSize) {
+ if (FLAG_debug_code) {
+ testl(result, Immediate(kDoubleAlignmentMask));
+ Check(zero, kAllocationIsNotDoubleAligned);
+ }
+ } else {
+ // Align the next allocation. Storing the filler map without checking top
+ // is safe in new-space because the limit of the heap is aligned there.
+ DCHECK(kPointerSize * 2 == kDoubleSize);
+ DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ // Make sure scratch is not clobbered by this function as it might be
+ // used in UpdateAllocationTopHelper later.
+ DCHECK(!scratch.is(kScratchRegister));
+ Label aligned;
+ testl(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ cmpp(result, ExternalOperand(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
+ movp(Operand(result, 0), kScratchRegister);
+ addp(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+}
+
+
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch,
AllocationFlags flags) {
if (emit_debug_code()) {
- testq(result_end, Immediate(kObjectAlignmentMask));
+ testp(result_end, Immediate(kObjectAlignmentMask));
Check(zero, kUnalignedAllocationInNewSpace);
}
Register scratch,
Label* gc_required,
AllocationFlags flags) {
- ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
jmp(gc_required);
return;
}
- ASSERT(!result.is(result_end));
+ DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, kAllocationIsNotDoubleAligned);
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
}
// Calculate new top and bail out if new space is exhausted.
if (!top_reg.is(result)) {
movp(top_reg, result);
}
- addq(top_reg, Immediate(object_size));
+ addp(top_reg, Immediate(object_size));
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(top_reg, limit_operand);
+ cmpp(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
if (tag_result) {
- subq(result, Immediate(object_size - kHeapObjectTag));
+ subp(result, Immediate(object_size - kHeapObjectTag));
} else {
- subq(result, Immediate(object_size));
+ subp(result, Immediate(object_size));
}
} else if (tag_result) {
// Tag the result if requested.
- ASSERT(kHeapObjectTag == 1);
- incq(result);
+ DCHECK(kHeapObjectTag == 1);
+ incp(result);
}
}
Register scratch,
Label* gc_required,
AllocationFlags flags) {
- ASSERT((flags & SIZE_IN_WORDS) == 0);
- lea(result_end, Operand(element_count, element_size, header_size));
+ DCHECK((flags & SIZE_IN_WORDS) == 0);
+ leap(result_end, Operand(element_count, element_size, header_size));
Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
Register scratch,
Label* gc_required,
AllocationFlags flags) {
- ASSERT((flags & SIZE_IN_WORDS) == 0);
+ DCHECK((flags & SIZE_IN_WORDS) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
jmp(gc_required);
return;
}
- ASSERT(!result.is(result_end));
+ DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, kAllocationIsNotDoubleAligned);
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
}
// Calculate new top and bail out if new space is exhausted.
if (!object_size.is(result_end)) {
movp(result_end, object_size);
}
- addq(result_end, result);
+ addp(result_end, result);
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(result_end, limit_operand);
+ cmpp(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
+ addp(result, Immediate(kHeapObjectTag));
}
}
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
+ andp(object, Immediate(~kHeapObjectTagMask));
Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
- cmpq(object, top_operand);
+ cmpp(object, top_operand);
Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
movp(top_operand, object);
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch,
- Label* gc_required) {
+ Label* gc_required,
+ MutableMode mode) {
// Allocate heap number in new space.
Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
- // Set the map.
- LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateSIMDHeapObject(int size,
- Register result,
- Register scratch,
- Label* gc_required,
- Heap::RootListIndex map_index) {
- Allocate(size, result, scratch, no_reg, gc_required, TAG_OBJECT);
+ Heap::RootListIndex map_index = mode == MUTABLE
+ ? Heap::kMutableHeapNumberMapRootIndex
+ : Heap::kHeapNumberMapRootIndex;
// Set the map.
LoadRoot(kScratchRegister, map_index);
// observing object alignment.
const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
kObjectAlignmentMask;
- ASSERT(kShortSize == 2);
+ DCHECK(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
+ leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate two byte string in new space.
const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
kObjectAlignmentMask;
movl(scratch1, length);
- ASSERT(kCharSize == 1);
- addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ DCHECK(kCharSize == 1);
+ addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate ASCII string in new space.
Register scratch1,
Register scratch2,
Label* gc_required) {
- Label allocate_new_space, install_map;
- AllocationFlags flags = TAG_OBJECT;
-
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(isolate());
-
- Load(scratch1, high_promotion_mode);
- testb(scratch1, Immediate(1));
- j(zero, &allocate_new_space);
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
- static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
-
- jmp(&install_map);
-
- bind(&allocate_new_space);
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- flags);
-
- bind(&install_map);
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
Register length,
int min_length,
Register scratch) {
- ASSERT(min_length >= 0);
+ DCHECK(min_length >= 0);
if (emit_debug_code()) {
cmpl(length, Immediate(min_length));
Assert(greater_equal, kInvalidMinLength);
j(below, &short_string, Label::kNear);
}
- ASSERT(source.is(rsi));
- ASSERT(destination.is(rdi));
- ASSERT(length.is(rcx));
+ DCHECK(source.is(rsi));
+ DCHECK(destination.is(rdi));
+ DCHECK(length.is(rcx));
if (min_length <= kLongStringLimit) {
cmpl(length, Immediate(2 * kPointerSize));
// at the end of the ranges.
movp(scratch, length);
shrl(length, Immediate(kPointerSizeLog2));
- repmovsq();
+ repmovsp();
// Move remaining bytes of length.
andl(scratch, Immediate(kPointerSize - 1));
movp(length, Operand(source, scratch, times_1, -kPointerSize));
movp(Operand(destination, scratch, times_1, -kPointerSize), length);
- addq(destination, scratch);
+ addp(destination, scratch);
if (min_length <= kLongStringLimit) {
jmp(&done, Label::kNear);
// Move remaining bytes of length.
movp(scratch, Operand(source, length, times_1, -kPointerSize));
movp(Operand(destination, length, times_1, -kPointerSize), scratch);
- addq(destination, length);
+ addp(destination, length);
jmp(&done, Label::kNear);
bind(&short_string);
bind(&short_loop);
movb(scratch, Operand(source, 0));
movb(Operand(destination, 0), scratch);
- incq(source);
- incq(destination);
+ incp(source);
+ incp(destination);
decl(length);
j(not_zero, &short_loop);
}
jmp(&entry);
bind(&loop);
movp(Operand(start_offset, 0), filler);
- addq(start_offset, Immediate(kPointerSize));
+ addp(start_offset, Immediate(kPointerSize));
bind(&entry);
- cmpq(start_offset, end_offset);
+ cmpp(start_offset, end_offset);
j(less, &loop);
}
int offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
- cmpq(map_in_out, FieldOperand(scratch, offset));
+ cmpp(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match);
// Use the transitioned cached map.
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- movp(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
#ifdef _WIN64
static const int kRegisterPassedArguments = 4;
#else
}
-void MacroAssembler::LoadArrayFunction(Register function) {
- movp(function,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movp(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- movp(function,
- Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
// arguments.
// On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
// and the caller does not reserve stack slots for them.
- ASSERT(num_arguments >= 0);
+ DCHECK(num_arguments >= 0);
#ifdef _WIN64
const int kMinimumStackSlots = kRegisterPassedArguments;
if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
Abort(kNonObject);
bind(&is_object);
- push(value);
+ Push(value);
movp(value, FieldOperand(string, HeapObject::kMapOffset));
- movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+ movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- cmpq(value, Immediate(encoding_mask));
- pop(value);
+ cmpp(value, Immediate(encoding_mask));
+ Pop(value);
Check(equal, kUnexpectedStringType);
// The index is assumed to be untagged coming in, tag it to compare with the
void MacroAssembler::PrepareCallCFunction(int num_arguments) {
- int frame_alignment = OS::ActivationFrameAlignment();
- ASSERT(frame_alignment != 0);
- ASSERT(num_arguments >= 0);
+ int frame_alignment = base::OS::ActivationFrameAlignment();
+ DCHECK(frame_alignment != 0);
+ DCHECK(num_arguments >= 0);
// Make stack end at alignment and allocate space for arguments and old rsp.
movp(kScratchRegister, rsp);
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(IsPowerOf2(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
- and_(rsp, Immediate(-frame_alignment));
+ subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
+ andp(rsp, Immediate(-frame_alignment));
movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
}
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
- ASSERT(has_frame());
+ DCHECK(has_frame());
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
}
call(function);
- ASSERT(OS::ActivationFrameAlignment() != 0);
- ASSERT(num_arguments >= 0);
+ DCHECK(base::OS::ActivationFrameAlignment() != 0);
+ DCHECK(num_arguments >= 0);
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
}
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6,
+ Register reg7,
+ Register reg8) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
+ reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid();
+
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+ if (reg7.is_valid()) regs |= reg7.bit();
+ if (reg8.is_valid()) regs |= reg8.bit();
+ int n_of_non_aliasing_regs = NumRegs(regs);
+
+ return n_of_valid_regs != n_of_non_aliasing_regs;
}
+#endif
CodePatcher::CodePatcher(byte* address, int size)
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ CpuFeatures::FlushICache(address_, size_);
// Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ DCHECK(masm_.pc_ == address_ + size_);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
Condition cc,
Label* condition_met,
Label::Distance condition_met_distance) {
- ASSERT(cc == zero || cc == not_zero);
+ DCHECK(cc == zero || cc == not_zero);
if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
movp(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
+ andp(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
testb(Operand(scratch, MemoryChunk::kFlagsOffset),
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
Move(scratch, map);
- movp(scratch, FieldOperand(scratch, Map::kBitField3Offset));
- SmiToInteger32(scratch, scratch);
- and_(scratch, Immediate(Map::Deprecated::kMask));
+ movl(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ andl(scratch, Immediate(Map::Deprecated::kMask));
j(not_zero, if_deprecated);
}
}
Register mask_scratch,
Label* on_black,
Label::Distance on_black_distance) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+ DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
GetMarkBits(object, bitmap_scratch, mask_scratch);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
// The mask_scratch register contains a 1 at the position of the first bit
// and a 0 at all other positions, including the position of the second bit.
movp(rcx, mask_scratch);
// Make rcx into a mask that covers both marking bits using the operation
// rcx = mask | (mask << 1).
- lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+ leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
// Note that we are using a 4-byte aligned 8-byte load.
- and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- cmpq(mask_scratch, rcx);
+ andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ cmpp(mask_scratch, rcx);
j(equal, on_black, on_black_distance);
}
movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
j(equal, &is_data_object, Label::kNear);
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
+ DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
movp(bitmap_reg, addr_reg);
// Sign extended 32 bit immediate.
- and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
movp(rcx, addr_reg);
int shift =
Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
shrl(rcx, Immediate(shift));
- and_(rcx,
+ andp(rcx,
Immediate((Page::kPageAlignmentMask >> shift) &
~(Bitmap::kBytesPerCell - 1)));
- addq(bitmap_reg, rcx);
+ addp(bitmap_reg, rcx);
movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
- and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+ andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
movl(mask_reg, Immediate(1));
- shl_cl(mask_reg);
+ shlp_cl(mask_reg);
}
Register mask_scratch,
Label* value_is_white_and_not_data,
Label::Distance distance) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
+ DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label done;
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(not_zero, &done, Label::kNear);
if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
- push(mask_scratch);
+ Push(mask_scratch);
// shl. May overflow making the check conservative.
- addq(mask_scratch, mask_scratch);
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ addp(mask_scratch, mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
- pop(mask_scratch);
+ Pop(mask_scratch);
}
// Value is white. We check whether it is data that doesn't need scanning.
bind(¬_heap_number);
// Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = rcx;
Label not_external;
// External strings are the only ones with the kExternalStringTag bit
// set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
testb(instance_type, Immediate(kExternalStringTag));
j(zero, ¬_external, Label::kNear);
movp(length, Immediate(ExternalString::kSize));
bind(¬_external);
// Sequential string, either ASCII or UC16.
- ASSERT(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- addq(length, Immediate(0x04));
+ DCHECK(kOneByteStringTag == 0x04);
+ andp(length, Immediate(kStringEncodingMask));
+ xorp(length, Immediate(kStringEncodingMask));
+ addp(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
+ imulp(length, FieldOperand(value, String::kLengthOffset));
+ shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
+ addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ andp(length, Immediate(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
bind(&done);
// Check that there are no elements. Register rcx contains the current JS
// object we've reached through the prototype chain.
Label no_elements;
- cmpq(empty_fixed_array_value,
+ cmpp(empty_fixed_array_value,
FieldOperand(rcx, JSObject::kElementsOffset));
j(equal, &no_elements);
// Second chance, the object may be using the empty slow element dictionary.
LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
- cmpq(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
+ cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
j(not_equal, call_runtime);
bind(&no_elements);
movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- cmpq(rcx, null_value);
+ cmpp(rcx, null_value);
j(not_equal, &next);
}
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
- lea(scratch_reg, Operand(receiver_reg,
+ leap(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
Move(kScratchRegister, new_space_start);
- cmpq(scratch_reg, kScratchRegister);
+ cmpp(scratch_reg, kScratchRegister);
j(less, no_memento_found);
- cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
+ cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
j(greater, no_memento_found);
CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
Heap::kAllocationMementoMapRootIndex);
Register scratch0,
Register scratch1,
Label* found) {
- ASSERT(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
- ASSERT(!scratch1.is(scratch0));
+ DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
+ DCHECK(!scratch1.is(scratch0));
Register current = scratch0;
Label loop_again;
bind(&loop_again);
movp(current, FieldOperand(current, HeapObject::kMapOffset));
movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
- and_(scratch1, Immediate(Map::kElementsKindMask));
- shr(scratch1, Immediate(Map::kElementsKindShift));
- cmpq(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ DecodeField<Map::ElementsKindBits>(scratch1);
+ cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
movp(current, FieldOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
}
+void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
+ DCHECK(!dividend.is(rax));
+ DCHECK(!dividend.is(rdx));
+ MultiplierAndShift ms(divisor);
+ movl(rax, Immediate(ms.multiplier()));
+ imull(dividend);
+ if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
+ if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
+ movl(rax, dividend);
+ shrl(rax, Immediate(31));
+ addl(rdx, rax);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64