__ movq(rbx, r8);
#endif // _WIN64
- // Set up the roots register.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(kRootRegister, roots_address);
-
// Current stack contents:
// [rsp + 2 * kPointerSize ... ]: Internal frame
// [rsp + kPointerSize] : function
&& (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
&& (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
&& (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
- && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
&& (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
}
#endif
__ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
}
if (is_increment) {
- __ SmiAddConstant(kScratchRegister,
+ __ SmiAddConstant(new_value.reg(),
new_value.reg(),
Smi::FromInt(1),
deferred->entry_label());
} else {
- __ SmiSubConstant(kScratchRegister,
+ __ SmiSubConstant(new_value.reg(),
new_value.reg(),
Smi::FromInt(1),
deferred->entry_label());
}
- __ movq(new_value.reg(), kScratchRegister);
deferred->BindExit();
// Postfix count operations return their input converted to
__ bind(&seq_ascii_string);
// rax: subject string (sequential ascii)
// rcx: RegExp data (FixedArray)
- __ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
__ Set(rdi, 1); // Type is ascii.
__ jmp(&check_code);
__ bind(&seq_two_byte_string);
// rax: subject string (flat two-byte)
// rcx: RegExp data (FixedArray)
- __ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
+ __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
__ Set(rdi, 0); // Type is two byte.
__ bind(&check_code);
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// the hole.
- __ CmpObjectType(r12, CODE_TYPE, kScratchRegister);
+ __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// rax: subject string
// rdi: encoding of subject string (1 if ascii, 0 if two_byte);
- // r12: code
+ // r11: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
__ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
// rax: subject string
// rbx: previous index
// rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r12: code
+ // r11: code
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(&Counters::regexp_entry_native, 1);
// rax: subject string
// rbx: previous index
// rdi: encoding of subject string (1 if ascii 0 if two_byte);
- // r12: code
+ // r11: code
// Argument 4: End of string data
// Argument 3: Start of string data
__ movq(arg1, rax);
// Locate the code entry and call it.
- __ addq(r12, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(r12, kRegExpExecuteArguments);
+ __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ CallCFunction(r11, kRegExpExecuteArguments);
// rsi is caller save, as it is used to pass parameter.
__ pop(rsi);
// rbp: frame pointer (restored after C call).
// rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
- // r15: pointer to the first argument (C callee-saved).
+ // r12: pointer to the first argument (C callee-saved).
// This pointer is reused in LeaveExitFrame(), so it is stored in a
// callee-saved register.
// Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
// Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
__ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
- __ movq(Operand(rsp, 5 * kPointerSize), r15); // argv.
+ __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv.
if (result_size_ < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
#else // _WIN64
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc.
- __ movq(rsi, r15); // argv.
+ __ movq(rsi, r12); // argv.
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
// rbp: frame pointer of exit frame (restored after C call).
// rsp: stack pointer (restored after C call).
// r14: number of arguments including receiver (C callee-saved).
- // r15: argv pointer (C callee-saved).
+ // r12: argv pointer (C callee-saved).
Label throw_normal_exception;
Label throw_termination_exception;
// Push the stack frame type marker twice.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ Push(Smi::FromInt(marker)); // context slot
- __ Push(Smi::FromInt(marker)); // function slot
- // Save callee-saved registers (X64 calling conventions).
+ // Scratch register is neither callee-save, nor an argument register on any
+ // platform. It's free to use at this point.
+ // Cannot use smi-register for loading yet.
+ __ movq(kScratchRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
+ RelocInfo::NONE);
+ __ push(kScratchRegister); // context slot
+ __ push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/Win64 calling conventions).
__ push(r12);
__ push(r13);
__ push(r14);
__ push(r15);
- __ push(rdi);
- __ push(rsi);
+#ifdef _WIN64
+ __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+#endif
__ push(rbx);
- // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
- // callee-save in JS code as well.
+ // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
+ // callee save as well.
// Save copies of the top frame descriptor on the stack.
ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
__ load_rax(c_entry_fp);
__ push(rax);
+ // Set up the roots and smi constant registers.
+ // Needs to be done before any further smi loads.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ movq(kRootRegister, roots_address);
+ __ InitializeSmiConstantRegister();
+
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
// Restore callee-saved registers (X64 conventions).
__ pop(rbx);
+#ifdef _WIN64
+ // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
__ pop(rsi);
__ pop(rdi);
+#endif
__ pop(r15);
__ pop(r14);
__ pop(r13);
// Check that both strings are non-external ascii strings.
__ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
- &string_add_runtime);
+ &string_add_runtime);
// Get the two characters forming the sub string.
__ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
// just allocate a new one.
Label make_two_character_string, make_flat_ascii_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, rbx, rcx, r14, r12, rdi, r15, &make_two_character_string);
+ masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
__ bind(&make_flat_ascii_string);
// Both strings are ascii strings. As they are short they are both flat.
- __ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
+ __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
// rcx: result string
__ movq(rbx, rcx);
// Locate first character of result.
__ j(not_zero, &string_add_runtime);
// Both strings are two byte strings. As they are short they are both
// flat.
- __ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
+ __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
// rcx: result string
__ movq(rbx, rcx);
// Locate first character of result.
class EntryFrameConstants : public AllStatic {
public:
+#ifdef _WIN64
static const int kCallerFPOffset = -10 * kPointerSize;
+#else
+ static const int kCallerFPOffset = -8 * kPointerSize;
+#endif
static const int kArgvOffset = 6 * kPointerSize;
};
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
- xor_(dst, dst);
+ xorl(dst, dst);
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else if (is_uint32(x)) {
}
}
-
void MacroAssembler::Set(const Operand& dst, int64_t x) {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
static int kSmiShift = kSmiTagSize + kSmiShiftSize;
+Register MacroAssembler::GetSmiConstant(Smi* source) {
+ int value = source->value();
+ if (value == 0) {
+ xorl(kScratchRegister, kScratchRegister);
+ return kScratchRegister;
+ }
+ if (value == 1) {
+ return kSmiConstantRegister;
+ }
+ LoadSmiConstant(kScratchRegister, source);
+ return kScratchRegister;
+}
+
+void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
+ if (FLAG_debug_code) {
+ movq(dst,
+ reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
+ RelocInfo::NONE);
+ cmpq(dst, kSmiConstantRegister);
+ if (allow_stub_calls()) {
+ Assert(equal, "Uninitialized kSmiConstantRegister");
+ } else {
+ Label ok;
+ j(equal, &ok);
+ int3();
+ bind(&ok);
+ }
+ }
+ if (source->value() == 0) {
+ xorl(dst, dst);
+ return;
+ }
+ int value = source->value();
+ bool negative = value < 0;
+ unsigned int uvalue = negative ? -value : value;
+
+ switch (uvalue) {
+ case 9:
+ lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
+ break;
+ case 8:
+ xorl(dst, dst);
+ lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
+ break;
+ case 4:
+ xorl(dst, dst);
+ lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
+ break;
+ case 5:
+ lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
+ break;
+ case 3:
+ lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
+ break;
+ case 2:
+ lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
+ break;
+ case 1:
+ movq(dst, kSmiConstantRegister);
+ break;
+ case 0:
+ UNREACHABLE();
+ return;
+ default:
+ movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
+ return;
+ }
+ if (negative) {
+ neg(dst);
+ }
+}
+
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
ASSERT_EQ(0, kSmiTag);
if (!dst.is(src)) {
Condition MacroAssembler::CheckPositiveSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
+ // Make mask 0x8000000000000001 and test that both bits are zero.
movq(kScratchRegister, src);
rol(kScratchRegister, Immediate(1));
- testl(kScratchRegister, Immediate(0x03));
+ testb(kScratchRegister, Immediate(3));
return zero;
}
}
-
Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
if (first.is(second)) {
return CheckSmi(first);
Condition MacroAssembler::CheckIsMinSmi(Register src) {
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- movq(kScratchRegister, src);
- rol(kScratchRegister, Immediate(1));
- cmpq(kScratchRegister, Immediate(1));
- return equal;
+ ASSERT(!src.is(kScratchRegister));
+ // If we overflow by subtracting one, it's the minimal smi value.
+ cmpq(src, kSmiConstantRegister);
+ return overflow;
}
Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
// An unsigned 32-bit integer value is valid as long as the high bit
// is not set.
- testq(src, Immediate(0x80000000));
- return zero;
+ testl(src, src);
+ return positive;
}
}
Assert(no_overflow, "Smi subtraction overflow");
} else if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- subq(kScratchRegister, src2);
+ movq(kScratchRegister, src2);
+ cmpq(src1, kScratchRegister);
j(overflow, on_not_smi_result);
- movq(src1, kScratchRegister);
+ subq(src1, kScratchRegister);
} else {
movq(dst, src1);
subq(dst, src2);
JumpIfNotSmi(src, on_not_smi_result);
Register tmp = (dst.is(src) ? kScratchRegister : dst);
- Move(tmp, constant);
+ LoadSmiConstant(tmp, constant);
addq(tmp, src);
j(overflow, on_not_smi_result);
if (dst.is(src)) {
if (!dst.is(src)) {
movq(dst, src);
}
+ return;
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
-
- Move(kScratchRegister, constant);
- addq(dst, kScratchRegister);
+ switch (constant->value()) {
+ case 1:
+ addq(dst, kSmiConstantRegister);
+ return;
+ case 2:
+ lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ return;
+ case 4:
+ lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ return;
+ case 8:
+ lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ return;
+ default:
+ Register constant_reg = GetSmiConstant(constant);
+ addq(dst, constant_reg);
+ return;
+ }
} else {
- Move(dst, constant);
- addq(dst, src);
+ switch (constant->value()) {
+ case 1:
+ lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
+ return;
+ case 2:
+ lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ return;
+ case 4:
+ lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ return;
+ case 8:
+ lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ return;
+ default:
+ LoadSmiConstant(dst, constant);
+ addq(dst, src);
+ return;
+ }
}
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- Move(kScratchRegister, constant);
- addq(kScratchRegister, dst);
+ LoadSmiConstant(kScratchRegister, constant);
+ addq(kScratchRegister, src);
j(overflow, on_not_smi_result);
movq(dst, kScratchRegister);
} else {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
addq(dst, src);
j(overflow, on_not_smi_result);
}
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
-
- Move(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(constant);
+ subq(dst, constant_reg);
} else {
- // Subtract by adding the negative, to do it in two operations.
if (constant->value() == Smi::kMinValue) {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
addq(dst, src);
} else {
// Subtract by adding the negation.
- Move(dst, Smi::FromInt(-constant->value()));
+ LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
addq(dst, src);
}
}
// We test the non-negativeness before doing the subtraction.
testq(src, src);
j(not_sign, on_not_smi_result);
- Move(kScratchRegister, constant);
+ LoadSmiConstant(kScratchRegister, constant);
subq(dst, kScratchRegister);
} else {
// Subtract by adding the negation.
- Move(kScratchRegister, Smi::FromInt(-constant->value()));
+ LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
addq(kScratchRegister, dst);
j(overflow, on_not_smi_result);
movq(dst, kScratchRegister);
// We test the non-negativeness before doing the subtraction.
testq(src, src);
j(not_sign, on_not_smi_result);
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
addq(dst, src);
} else {
// Subtract by adding the negation.
- Move(dst, Smi::FromInt(-(constant->value())));
+ LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
addq(dst, src);
j(overflow, on_not_smi_result);
}
xor_(dst, dst);
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- Move(kScratchRegister, constant);
- and_(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(constant);
+ and_(dst, constant_reg);
} else {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
and_(dst, src);
}
}
void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- Move(kScratchRegister, constant);
- or_(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(constant);
+ or_(dst, constant_reg);
} else {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
or_(dst, src);
}
}
void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
- Move(kScratchRegister, constant);
- xor_(dst, kScratchRegister);
+ Register constant_reg = GetSmiConstant(constant);
+ xor_(dst, constant_reg);
} else {
- Move(dst, constant);
+ LoadSmiConstant(dst, constant);
xor_(dst, src);
}
}
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
}
+
SmiIndex MacroAssembler::SmiToIndex(Register dst,
Register src,
int shift) {
if (is_int32(smi)) {
push(Immediate(static_cast<int32_t>(smi)));
} else {
- Set(kScratchRegister, smi);
- push(kScratchRegister);
+ Register constant = GetSmiConstant(source);
+ push(constant);
}
}
movq(rax, rsi);
store_rax(context_address);
- // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
+ // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, r14, times_pointer_size, offset));
+ lea(r12, Operand(rbp, r14, times_pointer_size, offset));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
// Registers:
- // r15 : argv
+ // r12 : argv
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
// Pop everything up to and including the arguments and the receiver
// from the caller stack.
- lea(rsp, Operand(r15, 1 * kPointerSize));
+ lea(rsp, Operand(r12, 1 * kPointerSize));
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Top::k_context_address);
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
-static const Register kScratchRegister = { 10 }; // r10.
-static const Register kRootRegister = { 13 }; // r13
+static const Register kScratchRegister = { 10 }; // r10.
+static const Register kSmiConstantRegister = { 15 }; // r15 (callee save).
+static const Register kRootRegister = { 13 }; // r13 (callee save).
+// Value of smi in kSmiConstantRegister.
+static const int kSmiConstantRegisterValue = 1;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
+ void InitializeSmiConstantRegister() {
+ movq(kSmiConstantRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
+ RelocInfo::NONE);
+ }
+
// Conversions between tagged smi values and non-tagged integer values.
// Tag an integer value. The result must be known to be a valid smi value.
// Basic Smi operations.
void Move(Register dst, Smi* source) {
- Set(dst, reinterpret_cast<int64_t>(source));
+ LoadSmiConstant(dst, source);
}
void Move(const Operand& dst, Smi* source) {
- Set(dst, reinterpret_cast<int64_t>(source));
+ Register constant = GetSmiConstant(source);
+ movq(dst, constant);
}
void Push(Smi* smi);
private:
bool generating_stub_;
bool allow_stub_calls_;
+
+ // Returns a register holding the smi value. The register MUST NOT be
+ // modified. It may be the "smi 1 constant" register.
+ Register GetSmiConstant(Smi* value);
+
+ // Moves the smi value to the destination register.
+ void LoadSmiConstant(Register dst, Smi* value);
+
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
bool RegisterAllocator::IsReserved(Register reg) {
return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
- reg.is(kScratchRegister) || reg.is(kRootRegister);
+ reg.is(kScratchRegister) || reg.is(kRootRegister) ||
+ reg.is(kSmiConstantRegister);
}
5, // r8
6, // r9
-1, // r10 Scratch register.
- 9, // r11
- 10, // r12
+ 8, // r11
+ 9, // r12
-1, // r13 Roots array. This is callee saved.
7, // r14
- 8 // r15
+ -1 // r15 Smi constant register.
};
return kNumbers[reg.code()];
}
Register RegisterAllocator::ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters);
const Register kRegisters[] =
- { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r15, r11, r12 };
+ { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r11, r12 };
return kRegisters[num];
}
class RegisterAllocatorConstants : public AllStatic {
public:
- static const int kNumRegisters = 11;
+ static const int kNumRegisters = 10;
static const int kInvalidRegister = -1;
};
using v8::internal::r8;
using v8::internal::r9;
using v8::internal::r11;
-using v8::internal::r12; // Remember: r12..r15 are callee save!
+using v8::internal::r12;
using v8::internal::r13;
using v8::internal::r14;
-using v8::internal::r15;
using v8::internal::times_pointer_size;
using v8::internal::FUNCTION_CAST;
using v8::internal::CodeDesc;
#define __ masm->
+
+static void EntryCode(MacroAssembler* masm) {
+ // Smi constant register is callee save.
+ __ push(v8::internal::kSmiConstantRegister);
+ __ InitializeSmiConstantRegister();
+}
+
+
+static void ExitCode(MacroAssembler* masm) {
+ // Return -1 if kSmiConstantRegister was clobbered during the test.
+ __ Move(rdx, Smi::FromInt(1));
+ __ cmpq(rdx, v8::internal::kSmiConstantRegister);
+ __ movq(rdx, Immediate(-1));
+ __ cmovq(not_equal, rax, rdx);
+ __ pop(v8::internal::kSmiConstantRegister);
+}
+
+
TEST(Smi) {
// Check that C++ Smi operations work as expected.
int64_t test_numbers[] = {
MacroAssembler assembler(buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
TestMoveSmi(masm, &exit, 1, Smi::FromInt(0));
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
TestSmiCompare(masm, &exit, 0x10, 0, 0);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
__ movq(rax, Immediate(1)); // Test number.
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
int64_t twice_max = static_cast<int64_t>(Smi::kMaxValue) * 2;
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
Condition cond;
__ xor_(rax, rax);
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
TestSmiNeg(masm, &exit, 0x10, 0);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
// No-overflow tests.
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
SmiSubTest(masm, &exit, 0x10, 1, 2);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
TestSmiMul(masm, &exit, 0x10, 0, 0);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
#endif
bool fraction = !division_by_zero && !overflow && (x % y != 0);
__ Move(r11, Smi::FromInt(x));
- __ Move(r12, Smi::FromInt(y));
+ __ Move(r14, Smi::FromInt(y));
if (!fraction && !overflow && !negative_zero && !division_by_zero) {
// Division succeeds
__ movq(rcx, r11);
- __ movq(r15, Immediate(id));
+ __ movq(r12, Immediate(id));
int result = x / y;
__ Move(r8, Smi::FromInt(result));
- __ SmiDiv(r9, rcx, r12, exit);
- // Might have destroyed rcx and r12.
- __ incq(r15);
+ __ SmiDiv(r9, rcx, r14, exit);
+ // Might have destroyed rcx and r14.
+ __ incq(r12);
__ SmiCompare(r9, r8);
__ j(not_equal, exit);
- __ incq(r15);
+ __ incq(r12);
__ movq(rcx, r11);
- __ Move(r12, Smi::FromInt(y));
+ __ Move(r14, Smi::FromInt(y));
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
- __ incq(r15);
- __ SmiDiv(rcx, rcx, r12, exit);
+ __ incq(r12);
+ __ SmiDiv(rcx, rcx, r14, exit);
- __ incq(r15);
+ __ incq(r12);
__ SmiCompare(rcx, r8);
__ j(not_equal, exit);
} else {
// Division fails.
- __ movq(r15, Immediate(id + 8));
+ __ movq(r12, Immediate(id + 8));
Label fail_ok, fail_ok2;
__ movq(rcx, r11);
- __ SmiDiv(r9, rcx, r12, &fail_ok);
+ __ SmiDiv(r9, rcx, r14, &fail_ok);
__ jmp(exit);
__ bind(&fail_ok);
- __ incq(r15);
+ __ incq(r12);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
- __ incq(r15);
- __ SmiDiv(rcx, rcx, r12, &fail_ok2);
+ __ incq(r12);
+ __ SmiDiv(rcx, rcx, r14, &fail_ok2);
__ jmp(exit);
__ bind(&fail_ok2);
- __ incq(r15);
+ __ incq(r12);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
}
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
+ __ push(r14);
__ push(r12);
- __ push(r15);
TestSmiDiv(masm, &exit, 0x10, 1, 1);
TestSmiDiv(masm, &exit, 0x20, 1, 0);
TestSmiDiv(masm, &exit, 0x30, -1, 0);
TestSmiDiv(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
TestSmiDiv(masm, &exit, 0x140, Smi::kMinValue, -1);
- __ xor_(r15, r15); // Success.
+ __ xor_(r12, r12); // Success.
__ bind(&exit);
- __ movq(rax, r15);
- __ pop(r15);
+ __ movq(rax, r12);
__ pop(r12);
+ __ pop(r14);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
bool negative_zero = (!fraction && x < 0);
__ Move(rcx, Smi::FromInt(x));
__ movq(r11, rcx);
- __ Move(r12, Smi::FromInt(y));
+ __ Move(r14, Smi::FromInt(y));
if (!division_overflow && !negative_zero && !division_by_zero) {
// Modulo succeeds
- __ movq(r15, Immediate(id));
+ __ movq(r12, Immediate(id));
int result = x % y;
__ Move(r8, Smi::FromInt(result));
- __ SmiMod(r9, rcx, r12, exit);
+ __ SmiMod(r9, rcx, r14, exit);
- __ incq(r15);
+ __ incq(r12);
__ SmiCompare(r9, r8);
__ j(not_equal, exit);
- __ incq(r15);
+ __ incq(r12);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
- __ incq(r15);
- __ SmiMod(rcx, rcx, r12, exit);
+ __ incq(r12);
+ __ SmiMod(rcx, rcx, r14, exit);
- __ incq(r15);
+ __ incq(r12);
__ SmiCompare(rcx, r8);
__ j(not_equal, exit);
} else {
// Modulo fails.
- __ movq(r15, Immediate(id + 8));
+ __ movq(r12, Immediate(id + 8));
Label fail_ok, fail_ok2;
- __ SmiMod(r9, rcx, r12, &fail_ok);
+ __ SmiMod(r9, rcx, r14, &fail_ok);
__ jmp(exit);
__ bind(&fail_ok);
- __ incq(r15);
+ __ incq(r12);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
- __ incq(r15);
- __ SmiMod(rcx, rcx, r12, &fail_ok2);
+ __ incq(r12);
+ __ SmiMod(rcx, rcx, r14, &fail_ok2);
__ jmp(exit);
__ bind(&fail_ok2);
- __ incq(r15);
+ __ incq(r12);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
}
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
+ __ push(r14);
__ push(r12);
- __ push(r15);
TestSmiMod(masm, &exit, 0x10, 1, 1);
TestSmiMod(masm, &exit, 0x20, 1, 0);
TestSmiMod(masm, &exit, 0x30, -1, 0);
TestSmiMod(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
TestSmiMod(masm, &exit, 0x140, Smi::kMinValue, -1);
- __ xor_(r15, r15); // Success.
+ __ xor_(r12, r12); // Success.
__ bind(&exit);
- __ movq(rax, r15);
- __ pop(r15);
+ __ movq(rax, r12);
__ pop(r12);
+ __ pop(r14);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
&actual_size,
true));
CHECK(buffer);
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
TestSmiIndex(masm, &exit, 0x10, 0);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false); // Avoid inline checks.
+ EntryCode(masm);
Label exit;
TestSelectNonSmi(masm, &exit, 0x10, 0, 0);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
TestSmiAnd(masm, &exit, 0x10, 0, 0);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
TestSmiOr(masm, &exit, 0x10, 0, 0);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
TestSmiXor(masm, &exit, 0x10, 0, 0);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
TestSmiNot(masm, &exit, 0x10, 0);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 4,
&actual_size,
true));
CHECK(buffer);
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
TestSmiShiftLeft(masm, &exit, 0x10, 0);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
// Allocate an executable page of memory.
size_t actual_size;
byte* buffer =
- static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
&actual_size,
true));
CHECK(buffer);
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
TestSmiShiftLogicalRight(masm, &exit, 0x10, 0);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
TestSmiShiftArithmeticRight(masm, &exit, 0x10, 0);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
+ EntryCode(masm);
Label exit;
TestPositiveSmiPowerUp(masm, &exit, 0x20, 0);
__ xor_(rax, rax); // Success.
__ bind(&exit);
+ ExitCode(masm);
__ ret(0);
CodeDesc desc;
masm->set_allow_stub_calls(false);
Label exit;
- __ push(r12);
+ EntryCode(masm);
__ push(r13);
+ __ push(r14);
__ push(rbx);
__ push(rbp);
__ push(Immediate(0x100)); // <-- rbp
// r12 = rsp[3]
// rbx = rsp[5]
// r13 = rsp[7]
- __ lea(r12, Operand(rsp, 3 * kPointerSize));
+ __ lea(r14, Operand(rsp, 3 * kPointerSize));
__ lea(r13, Operand(rbp, -3 * kPointerSize));
__ lea(rbx, Operand(rbp, -5 * kPointerSize));
__ movl(rcx, Immediate(2));
__ lea(rsp, Operand(rbp, kPointerSize));
__ pop(rbp);
__ pop(rbx);
+ __ pop(r14);
__ pop(r13);
- __ pop(r12);
+ ExitCode(masm);
__ ret(0);