'LINKFLAGS': ['-m32']
},
'arch:x64': {
- 'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
+ 'CPPDEFINES': ['V8_TARGET_ARCH_X64', 'V8_LONG_SMI'],
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64'],
},
'ARFLAGS': ['/MACHINE:X86']
},
'arch:x64': {
- 'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
+ 'CPPDEFINES': ['V8_TARGET_ARCH_X64', 'V8_LONG_SMI'],
'LINKFLAGS': ['/MACHINE:X64'],
'ARFLAGS': ['/MACHINE:X64']
},
const int kHeapObjectTagSize = 2;
const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
-
+#ifdef V8_LONG_SMI
+#ifndef V8_TARGET_ARCH_X64
+#error "Large smis on non-64-bit platform."
+#endif
// Tag information for Smi.
const int kSmiTag = 0;
const int kSmiTagSize = 1;
const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
-
+const int kSmiShiftSize = 31;
+const int kSmiValueSize = 32;
+#else
+// Tag information for Smi.
+const int kSmiTag = 0;
+const int kSmiTagSize = 1;
+const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
+const int kSmiShiftSize = 0;
+const int kSmiValueSize = 31;
+#endif
/**
* This class exports constants and functionality from within v8 that
}
static inline int SmiValue(internal::Object* value) {
- return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> kSmiTagSize;
+#ifdef V8_LONG_SMI
+ int shift_bits = kSmiTagSize + kSmiShiftSize;
+ // Shift down and throw away top 32 bits.
+ return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
+#else
+ int shift_bits = kSmiTagSize + kSmiShiftSize;
+ // Throw away top 32 bits and shift down (requires >> to be sign extending).
+ return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
+#endif
}
static inline bool IsExternalTwoByteString(int instance_type) {
ON_BAILOUT("v8::SetElementsToPixelData()", return);
ENTER_V8;
HandleScope scope;
- if (!ApiCheck(i::Smi::IsValid(length),
+ if (!ApiCheck(length <= i::PixelArray::kMaxLength,
"v8::Object::SetIndexedPropertiesToPixelData()",
"length exceeds max acceptable value")) {
return;
intptr_t data_value =
static_cast<intptr_t>(data_ptr >> i::Internals::kAlignedPointerShift);
STATIC_ASSERT(sizeof(data_ptr) == sizeof(data_value));
- if (i::Smi::IsIntptrValid(data_value)) {
+ if (i::Smi::IsValid(data_value)) {
i::Handle<i::Object> obj(i::Smi::FromIntptr(data_value));
return Utils::ToLocal(obj);
}
JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub.
- value = CALL_GENERATED_CODE(entry, func->code()->entry(), *func,
- *receiver, argc, args);
+ byte* entry_address= func->code()->entry();
+ JSFunction* function = *func;
+ Object* receiver_pointer = *receiver;
+ value = CALL_GENERATED_CODE(entry, entry_address, function,
+ receiver_pointer, argc, args);
}
#ifdef DEBUG
&& second->IsAsciiRepresentation();
// Make sure that an out of memory exception is thrown if the length
- // of the new cons string is too large to fit in a Smi.
- if (length > Smi::kMaxValue || length < -0) {
+ // of the new cons string is too large.
+ if (length > String::kMaxLength || length < 0) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
# a type error is thrown.
macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
-# Last input and last subject are after the captures so we can omit them on
-# results returned from global searches. Beware - these evaluate their
-# arguments twice.
+# Last input and last subject of regexp matches.
macro LAST_SUBJECT(array) = ((array)[1]);
macro LAST_INPUT(array) = ((array)[2]);
Smi* Smi::FromInt(int value) {
ASSERT(Smi::IsValid(value));
+ int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
intptr_t tagged_value =
- (static_cast<intptr_t>(value) << kSmiTagSize) | kSmiTag;
+ (static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
return reinterpret_cast<Smi*>(tagged_value);
}
Smi* Smi::FromIntptr(intptr_t value) {
ASSERT(Smi::IsValid(value));
- return reinterpret_cast<Smi*>((value << kSmiTagSize) | kSmiTag);
+ int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
+ return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
}
#ifdef DEBUG
bool in_range = (value >= kMinValue) && (value <= kMaxValue);
#endif
+
+#ifdef V8_LONG_SMI
+ // To be representable as a long smi, the value must be a 32-bit integer.
+ bool result = (value == static_cast<int32_t>(value));
+#else
// To be representable as an tagged small integer, the two
// most-significant bits of 'value' must be either 00 or 11 due to
// sign-extension. To check this we add 01 to the two
// in fact doesn't work correctly with gcc4.1.1 in some cases: The
// compiler may produce undefined results in case of signed integer
// overflow. The computation must be done w/ unsigned ints.
- bool result =
- ((static_cast<unsigned int>(value) + 0x40000000U) & 0x80000000U) == 0;
- ASSERT(result == in_range);
- return result;
-}
-
-
-bool Smi::IsIntptrValid(intptr_t value) {
-#ifdef DEBUG
- bool in_range = (value >= kMinValue) && (value <= kMaxValue);
+ bool result = (static_cast<uintptr_t>(value + 0x40000000U) < 0x80000000U);
#endif
- // See Smi::IsValid(int) for description.
- bool result =
- ((static_cast<uintptr_t>(value) + 0x40000000U) < 0x80000000U);
ASSERT(result == in_range);
return result;
}
// Smi represents integer Numbers that can be stored in 31 bits.
// Smis are immediate which means they are NOT allocated in the heap.
-// Smi stands for small integer.
// The this pointer has the following format: [31 bit signed int] 0
-// On 64-bit, the top 32 bits of the pointer is allowed to have any
-// value.
+// For long smis it has the following format:
+// [32 bit signed int] [31 bits zero padding] 0
+// Smi stands for small integer.
class Smi: public Object {
public:
// Returns the integer value.
// Returns whether value can be represented in a Smi.
static inline bool IsValid(intptr_t value);
- static inline bool IsIntptrValid(intptr_t);
-
// Casting.
static inline Smi* cast(Object* object);
void SmiVerify();
#endif
- static const int kSmiNumBits = 31;
- // Min and max limits for Smi values.
- static const int kMinValue = -(1 << (kSmiNumBits - 1));
- static const int kMaxValue = (1 << (kSmiNumBits - 1)) - 1;
+ static const int kMinValue = (-1 << (kSmiValueSize - 1));
+ static const int kMaxValue = -(kMinValue + 1);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Smi);
void PixelArrayVerify();
#endif // DEBUG
+ // Maximal acceptable length for a pixel array.
+ static const int kMaxLength = 0x3fffffff;
+
// PixelArray headers are not quadword aligned.
static const int kExternalPointerOffset = Array::kAlignedSize;
static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
static const int kShortLengthShift = kHashShift + kShortStringTag;
static const int kMediumLengthShift = kHashShift + kMediumStringTag;
static const int kLongLengthShift = kHashShift + kLongStringTag;
+ // Maximal string length that can be stored in the hash/length field.
+ static const int kMaxLength = (1 << (32 - kLongLengthShift)) - 1;
// Limit for truncation in short printing.
static const int kMaxShortPrintLength = 1024;
RUNTIME_ASSERT(type == FUNCTION_TEMPLATE_INFO_TYPE ||
type == OBJECT_TEMPLATE_INFO_TYPE);
RUNTIME_ASSERT(offset > 0);
- if (type == FUNCTION_TEMPLATE_INFO_TYPE) {
+ if (type == FUNCTION_TEMPLATE_INFO_TYPE) {
RUNTIME_ASSERT(offset < FunctionTemplateInfo::kSize);
} else {
RUNTIME_ASSERT(offset < ObjectTemplateInfo::kSize);
} else {
escaped_length += 3;
}
- // We don't allow strings that are longer than Smi range.
- if (!Smi::IsValid(escaped_length)) {
+ // We don't allow strings that are longer than a maximal length.
+ if (escaped_length > String::kMaxLength) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
} else if (elt->IsString()) {
String* element = String::cast(elt);
int element_length = element->length();
- if (!Smi::IsValid(element_length + position)) {
- Top::context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
position += element_length;
if (ascii && !element->IsAsciiRepresentation()) {
ascii = false;
} else {
return Top::Throw(Heap::illegal_argument_symbol());
}
+ if (position > String::kMaxLength) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
}
int length = position;
#include "serialize.h"
#include "stub-cache.h"
#include "v8threads.h"
+#include "top.h"
namespace v8 {
namespace internal {
}
// Top addresses
- const char* top_address_format = "Top::get_address_from_id(%i)";
- size_t top_format_length = strlen(top_address_format);
+ const char* top_address_format = "Top::%s";
+
+ const char* AddressNames[] = {
+#define C(name) #name,
+ TOP_ADDRESS_LIST(C)
+ TOP_ADDRESS_LIST_PROF(C)
+ NULL
+#undef C
+ };
+
+ size_t top_format_length = strlen(top_address_format) - 2;
for (uint16_t i = 0; i < Top::k_top_address_count; ++i) {
- Vector<char> name = Vector<char>::New(top_format_length + 1);
+ const char* address_name = AddressNames[i];
+ Vector<char> name =
+ Vector<char>::New(top_format_length + strlen(address_name) + 1);
const char* chars = name.start();
- OS::SNPrintF(name, top_address_format, i);
+ OS::SNPrintF(name, top_address_format, address_name);
Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars);
}
return top_addresses[id];
}
+
char* Top::Iterate(ObjectVisitor* v, char* thread_storage) {
ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
Iterate(v, thread);
// ----------------------------------------------------------------------------
// General helper functions
-// Returns true iff x is a power of 2. Does not work for zero.
+// Returns true iff x is a power of 2 (or zero). Cannot be used with the
+// maximally negative value of the type T (the -1 overflows).
template <typename T>
static inline bool IsPowerOf2(T x) {
return (x & (x - 1)) == 0;
return Heap::IdleNotification();
}
+static const uint32_t kRandomPositiveSmiMax = 0x3fffffff;
Smi* V8::RandomPositiveSmi() {
uint32_t random = Random();
- ASSERT(IsPowerOf2(Smi::kMaxValue + 1));
- return Smi::FromInt(random & Smi::kMaxValue);
+ ASSERT(static_cast<uint32_t>(Smi::kMaxValue) >= kRandomPositiveSmiMax);
+ // kRandomPositiveSmiMax must match the value being divided
+ // by in math.js.
+ return Smi::FromInt(random & kRandomPositiveSmiMax);
}
} } // namespace v8::internal
return static_cast<Condition>(cc ^ 1);
}
-// -----------------------------------------------------------------------------
-
-Immediate::Immediate(Smi* value) {
- value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
-}
// -----------------------------------------------------------------------------
// Implementation of Assembler
void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
+ ASSERT(is_uint5(shift_amount.value_)); // illegal shift count
if (shift_amount.value_ == 1) {
emit_optional_rex_32(dst);
emit(0xD1);
}
+void Assembler::clc() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF8);
+}
+
void Assembler::cdq() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
void Assembler::cmovq(Condition cc, Register dst, Register src) {
+ if (cc == always) {
+ movq(dst, src);
+ } else if (cc == never) {
+ return;
+ }
// No need to check CpuInfo for CMOV support, it's a required part of the
// 64-bit architecture.
ASSERT(cc >= 0); // Use mov for unconditional moves.
void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
+ if (cc == always) {
+ movq(dst, src);
+ } else if (cc == never) {
+ return;
+ }
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
void Assembler::cmovl(Condition cc, Register dst, Register src) {
+ if (cc == always) {
+ movl(dst, src);
+ } else if (cc == never) {
+ return;
+ }
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
+ if (cc == always) {
+ movl(dst, src);
+ } else if (cc == never) {
+ return;
+ }
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
void Assembler::j(Condition cc, Label* L) {
+ if (cc == always) {
+ jmp(L);
+ return;
+ } else if (cc == never) {
+ return;
+ }
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(is_uint4(cc));
// There is no possible reason to store a heap pointer without relocation
// info, so it must be a smi.
ASSERT(value->IsSmi());
- // Smis never have more than 32 significant bits, but they might
- // have garbage in the high bits.
- movq(dst,
- Immediate(static_cast<int32_t>(reinterpret_cast<intptr_t>(*value))));
+ movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE);
} else {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
}
-void Assembler::rcl(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(is_uint6(imm8)); // illegal shift count
- if (imm8 == 1) {
- emit_rex_64(dst);
- emit(0xD1);
- emit_modrm(0x2, dst);
- } else {
- emit_rex_64(dst);
- emit(0xC1);
- emit_modrm(0x2, dst);
- emit(imm8);
- }
-}
-
void Assembler::rdtsc() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
void Assembler::setcc(Condition cc, Register reg) {
+ if (cc > last_condition) {
+ movb(reg, Immediate(cc == always ? 1 : 0));
+ return;
+ }
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(is_uint4(cc));
}
+void Assembler::testb(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (dst.code() > 3 || src.code() > 3) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(dst, src);
+ }
+ emit(0x84);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::testb(Register reg, Immediate mask) {
ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
EnsureSpace ensure_space(this);
less_equal = 14,
greater = 15,
+ // Fake conditions that are handled by the
+ // opcodes using them.
+ always = 16,
+ never = 17,
// aliases
carry = below,
not_carry = above_equal,
zero = equal,
not_zero = not_equal,
sign = negative,
- not_sign = positive
+ not_sign = positive,
+ last_condition = greater
};
class Immediate BASE_EMBEDDED {
public:
explicit Immediate(int32_t value) : value_(value) {}
- inline explicit Immediate(Smi* value);
private:
int32_t value_;
immediate_arithmetic_op_32(0x4, dst, src);
}
+ void andl(Register dst, Register src) {
+ arithmetic_op_32(0x23, dst, src);
+ }
+
+
void decq(Register dst);
void decq(const Operand& dst);
void decl(Register dst);
immediate_arithmetic_op(0x1, dst, src);
}
+ void orl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x1, dst, src);
+ }
+
void or_(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x1, dst, src);
}
+ void orl(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op_32(0x1, dst, src);
+ }
+
- void rcl(Register dst, uint8_t imm8);
+ void rcl(Register dst, Immediate imm8) {
+ shift(dst, imm8, 0x2);
+ }
+
+ void rol(Register dst, Immediate imm8) {
+ shift(dst, imm8, 0x0);
+ }
+
+ void rcr(Register dst, Immediate imm8) {
+ shift(dst, imm8, 0x3);
+ }
+
+ void ror(Register dst, Immediate imm8) {
+ shift(dst, imm8, 0x1);
+ }
// Shifts dst:src left by cl bits, affecting only dst.
void shld(Register dst, Register src);
immediate_arithmetic_op_8(0x5, dst, src);
}
+ void testb(Register dst, Register src);
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testl(Register dst, Register src);
void bts(const Operand& dst, Register src);
// Miscellaneous
+ void clc();
void cpuid();
void hlt();
void int3();
__ movq(rbp, rsp);
// Store the arguments adaptor context sentinel.
- __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
// Push the function on the stack.
__ push(rdi);
__ pop(rbp);
// Remove caller arguments from the stack.
- // rbx holds a Smi, so we convery to dword offset by multiplying by 4.
- // TODO(smi): Find a way to abstract indexing by a smi.
- ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0);
- ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4);
- // TODO(smi): Find way to abstract indexing by a smi.
__ pop(rcx);
- // 1 * kPointerSize is offset of receiver.
- __ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
+ SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+ __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ push(rcx);
}
// Because runtime functions always remove the receiver from the stack, we
// have to fake one to avoid underflowing the stack.
__ push(rax);
- __ push(Immediate(Smi::FromInt(0)));
+ __ Push(Smi::FromInt(0));
// Do call to runtime routine.
__ CallRuntime(Runtime::kStackGuard, 1);
// Update the index on the stack and in register rax.
__ movq(rax, Operand(rbp, kIndexOffset));
- __ addq(rax, Immediate(Smi::FromInt(1)));
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1));
__ movq(Operand(rbp, kIndexOffset), rax);
__ bind(&entry);
__ Move(FieldOperand(result, JSArray::kPropertiesOffset),
Factory::empty_fixed_array());
// Field JSArray::kElementsOffset is initialized later.
- __ movq(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
+ __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
// If no storage is requested for the elements array just set the empty
// fixed array.
__ cmpq(rax, Immediate(1));
__ j(not_equal, &argc_two_or_more);
__ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
- Condition not_positive_smi = __ CheckNotPositiveSmi(rdx);
- __ j(not_positive_smi, call_generic_code);
+ __ JumpIfNotPositiveSmi(rdx, call_generic_code);
// Handle construction of an empty array of a certain size. Bail out if size
// is to large to actually allocate an elements array.
- __ JumpIfSmiGreaterEqualsConstant(rdx,
- JSObject::kInitialMaxFastElementArray,
- call_generic_code);
+ __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
+ __ j(greater_equal, call_generic_code);
// rax: argc
// rdx: array_size (smi)
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
ASSERT(kSmiTag == 0);
- Condition not_smi = __ CheckNotSmi(rbx);
- __ Assert(not_smi, "Unexpected initial map for Array function");
+ Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+ __ Check(not_smi, "Unexpected initial map for Array function");
__ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Assert(equal, "Unexpected initial map for Array function");
+ __ Check(equal, "Unexpected initial map for Array function");
}
// Run the native code for the Array function called as a normal function.
// does always have a map.
GenerateLoadArrayFunction(masm, rbx);
__ cmpq(rdi, rbx);
- __ Assert(equal, "Unexpected Array function");
+ __ Check(equal, "Unexpected Array function");
// Initial map for the builtin Array function should be a map.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
ASSERT(kSmiTag == 0);
- Condition not_smi = __ CheckNotSmi(rbx);
- __ Assert(not_smi, "Unexpected initial map for Array function");
+ Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+ __ Check(not_smi, "Unexpected initial map for Array function");
__ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Assert(equal, "Unexpected initial map for Array function");
+ __ Check(equal, "Unexpected initial map for Array function");
}
// Run the native code for the Array function called as constructor.
// edi: called object
// eax: number of arguments
__ bind(&non_function_call);
-
// Set expected number of arguments to zero (not changing eax).
__ movq(rbx, Immediate(0));
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ LeaveConstructFrame();
// Remove caller arguments from the stack and return.
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- // TODO(smi): Find a way to abstract indexing by a smi.
__ pop(rcx);
- // 1 * kPointerSize is offset of receiver.
- __ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
+ SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+ __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ push(rcx);
__ IncrementCounter(&Counters::constructed_objects, 1);
__ ret(0);
}
}
+
void DeferredCode::RestoreRegisters() {
// Restore registers in reverse order due to the stack.
for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
// Test if operands are smi or number objects (fp). Requirements:
// operand_1 in rax, operand_2 in rdx; falls through on float or smi
// operands, jumps to the non_float label otherwise.
- static void CheckFloatOperands(MacroAssembler* masm,
- Label* non_float);
+ static void CheckNumberOperands(MacroAssembler* masm,
+ Label* non_float);
// Allocate a heap number in new space with undefined value.
// Returns tagged pointer in result, or jumps to need_gc if new space is full.
__ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
frame_->EmitPush(kScratchRegister);
frame_->EmitPush(rsi); // The context is the second argument.
- frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
+ frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
}
// adaptor frame below it.
Label invoke, adapted;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments.
// have to worry about getting rid of the elements from the virtual
// frame.
Label loop;
- __ bind(&loop);
__ testl(rcx, rcx);
__ j(zero, &invoke);
+ __ bind(&loop);
__ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
__ decl(rcx);
- __ jmp(&loop);
+ __ j(not_zero, &loop);
// Invoke the function. The virtual frame knows about the receiver
// so make sure to forget that explicitly.
// Declaration nodes are always introduced in one of two modes.
ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- frame_->EmitPush(Immediate(Smi::FromInt(attr)));
+ frame_->EmitPush(Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
} else if (node->fun() != NULL) {
Load(node->fun());
} else {
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // no initial value!
+ frame_->EmitPush(Smi::FromInt(0)); // no initial value!
}
Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
// Ignore the return value (declarations are statements).
__ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
__ Integer32ToSmi(rax, rax);
frame_->EmitPush(rax); // <- slot 1
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
+ frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
entry.Jump();
fixed_array.Bind();
// rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3
+ frame_->EmitPush(Smi::FromInt(0)); // <- slot 3
frame_->EmitPush(rax); // <- slot 2
// Push the length of the array and the initial index onto the stack.
__ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ Integer32ToSmi(rax, rax);
frame_->EmitPush(rax); // <- slot 1
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
+ frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
// Condition.
entry.Bind();
node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
__ movq(rax, frame_->ElementAt(0)); // load the current count
- __ cmpl(rax, frame_->ElementAt(1)); // compare to the array length
- node->break_target()->Branch(above_equal);
+ __ SmiCompare(frame_->ElementAt(1), rax); // compare to the array length
+ node->break_target()->Branch(below_equal);
// Get the i'th entry of the array.
__ movq(rdx, frame_->ElementAt(2));
node->continue_target()->Bind();
frame_->SpillAll();
frame_->EmitPop(rax);
- __ addq(rax, Immediate(Smi::FromInt(1)));
+ __ SmiAddConstant(rax, rax, Smi::FromInt(1));
frame_->EmitPush(rax);
entry.Jump();
frame_->EmitPush(rax);
// In case of thrown exceptions, this is where we continue.
- __ movq(rcx, Immediate(Smi::FromInt(THROWING)));
+ __ Move(rcx, Smi::FromInt(THROWING));
finally_block.Jump();
// --- Try block ---
// Fake a top of stack value (unneeded when FALLING) and set the
// state in ecx, then jump around the unlink blocks if any.
frame_->EmitPush(Heap::kUndefinedValueRootIndex);
- __ movq(rcx, Immediate(Smi::FromInt(FALLING)));
+ __ Move(rcx, Smi::FromInt(FALLING));
if (nof_unlinks > 0) {
finally_block.Jump();
}
// Fake TOS for targets that shadowed breaks and continues.
frame_->EmitPush(Heap::kUndefinedValueRootIndex);
}
- __ movq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
+ __ Move(rcx, Smi::FromInt(JUMPING + i));
if (--nof_unlinks > 0) {
// If this is not the last unlink block, jump around the next.
finally_block.Jump();
for (int i = 0; i < shadows.length(); i++) {
if (has_valid_frame() && shadows[i]->is_bound()) {
BreakTarget* original = shadows[i]->other_target();
- __ cmpq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
+ __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
if (i == kReturnShadowIndex) {
// The return value is (already) in rax.
Result return_value = allocator_->Allocate(rax);
if (has_valid_frame()) {
// Check if we need to rethrow the exception.
JumpTarget exit;
- __ cmpq(rcx, Immediate(Smi::FromInt(THROWING)));
+ __ SmiCompare(rcx, Smi::FromInt(THROWING));
exit.Branch(not_equal);
// Rethrow exception.
// Literal array (0).
__ push(literals_);
// Literal index (1).
- __ push(Immediate(Smi::FromInt(node_->literal_index())));
+ __ Push(Smi::FromInt(node_->literal_index()));
// RegExp pattern (2).
__ Push(node_->pattern());
// RegExp flags (3).
// Literal array (0).
__ push(literals_);
// Literal index (1).
- __ push(Immediate(Smi::FromInt(node_->literal_index())));
+ __ Push(Smi::FromInt(node_->literal_index()));
// Constant properties (2).
__ Push(node_->constant_properties());
__ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
// Literal array (0).
__ push(literals_);
// Literal index (1).
- __ push(Immediate(Smi::FromInt(node_->literal_index())));
+ __ Push(Smi::FromInt(node_->literal_index()));
// Constant properties (2).
__ Push(node_->literals());
__ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
__ push(dst_);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
__ push(rax);
- __ push(Immediate(Smi::FromInt(1)));
+ __ Push(Smi::FromInt(1));
if (is_increment_) {
__ CallRuntime(Runtime::kNumberAdd, 2);
} else {
// Call the runtime for the addition or subtraction.
__ push(rax);
- __ push(Immediate(Smi::FromInt(1)));
+ __ Push(Smi::FromInt(1));
if (is_increment_) {
__ CallRuntime(Runtime::kNumberAdd, 2);
} else {
is_increment);
}
- __ movq(kScratchRegister, new_value.reg());
+ __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
if (is_increment) {
- __ addl(kScratchRegister, Immediate(Smi::FromInt(1)));
+ __ SmiAddConstant(kScratchRegister,
+ new_value.reg(),
+ Smi::FromInt(1),
+ deferred->entry_label());
} else {
- __ subl(kScratchRegister, Immediate(Smi::FromInt(1)));
+ __ SmiSubConstant(kScratchRegister,
+ new_value.reg(),
+ Smi::FromInt(1),
+ deferred->entry_label());
}
- // Smi test.
- deferred->Branch(overflow);
- __ JumpIfNotSmi(kScratchRegister, deferred->entry_label());
__ movq(new_value.reg(), kScratchRegister);
deferred->BindExit();
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
- __ cmpq(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker);
__ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
- __ cmpq(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+ __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::CONSTRUCT));
fp.Unuse();
destination()->Split(equal);
}
void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
// RBP value is aligned, so it should be tagged as a smi (without necesarily
- // being padded as a smi).
+ // being padded as a smi, so it should not be treated as a smi.).
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
Result rbp_as_smi = allocator_->Allocate();
ASSERT(rbp_as_smi.is_valid());
dest->false_target()->Branch(equal);
// Smi => false iff zero.
- Condition equals = masm_->CheckSmiEqualsConstant(value.reg(), 0);
- dest->false_target()->Branch(equals);
+ __ SmiCompare(value.reg(), Smi::FromInt(0));
+ dest->false_target()->Branch(equal);
Condition is_smi = masm_->CheckSmi(value.reg());
dest->true_target()->Branch(is_smi);
right_side = Result(right_val);
// Test smi equality and comparison by signed int comparison.
// Both sides are smis, so we can use an Immediate.
- __ cmpl(left_side.reg(), Immediate(Smi::cast(*right_side.handle())));
+ __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
left_side.Unuse();
right_side.Unuse();
dest->Split(cc);
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
__ movq(temp.reg(),
- FieldOperand(operand.reg(), HeapObject::kMapOffset));
+ FieldOperand(operand.reg(), HeapObject::kMapOffset));
__ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
temp.Unuse();
CompareStub stub(cc, strict);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
// The result is a Smi, which is negative, zero, or positive.
- __ testl(answer.reg(), answer.reg()); // Both zero and sign flag right.
+ __ SmiTest(answer.reg()); // Sets both zero and sign flag.
answer.Unuse();
dest->Split(cc);
} else {
// When non-smi, call out to the compare stub.
CompareStub stub(cc, strict);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testl(answer.reg(), answer.reg()); // Sets both zero and sign flags.
+ __ SmiTest(answer.reg()); // Sets both zero and sign flags.
answer.Unuse();
dest->true_target()->Branch(cc);
dest->false_target()->Jump();
is_smi.Bind();
left_side = Result(left_reg);
right_side = Result(right_reg);
- __ cmpl(left_side.reg(), right_side.reg());
+ __ SmiCompare(left_side.reg(), right_side.reg());
right_side.Unuse();
left_side.Unuse();
dest->Split(cc);
void DeferredInlineSmiAdd::Generate() {
__ push(dst_);
- __ push(Immediate(value_));
+ __ Push(value_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
if (!dst_.is(rax)) __ movq(dst_, rax);
void DeferredInlineSmiAddReversed::Generate() {
- __ push(Immediate(value_)); // Note: sign extended.
+ __ Push(value_);
__ push(dst_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
void DeferredInlineSmiSub::Generate() {
__ push(dst_);
- __ push(Immediate(value_)); // Note: sign extended.
+ __ Push(value_);
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
if (!dst_.is(rax)) __ movq(dst_, rax);
void DeferredInlineSmiOperation::Generate() {
__ push(src_);
- __ push(Immediate(value_)); // Note: sign extended.
+ __ Push(value_);
// For mod we don't generate all the Smi code inline.
GenericBinaryOpStub stub(
op_,
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
__ SmiAddConstant(operand->reg(),
operand->reg(),
- int_value,
+ smi_value,
deferred->entry_label());
deferred->BindExit();
frame_->Push(operand);
// A smi currently fits in a 32-bit Immediate.
__ SmiSubConstant(operand->reg(),
operand->reg(),
- int_value,
+ smi_value,
deferred->entry_label());
deferred->BindExit();
frame_->Push(operand);
overwrite_mode);
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
__ SmiShiftLogicalRightConstant(answer.reg(),
- operand->reg(),
- shift_value,
- deferred->entry_label());
+ operand->reg(),
+ shift_value,
+ deferred->entry_label());
deferred->BindExit();
operand->Unuse();
frame_->Push(&answer);
overwrite_mode);
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
if (op == Token::BIT_AND) {
- __ SmiAndConstant(operand->reg(), operand->reg(), int_value);
+ __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
} else if (op == Token::BIT_XOR) {
if (int_value != 0) {
- __ SmiXorConstant(operand->reg(), operand->reg(), int_value);
+ __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
}
} else {
ASSERT(op == Token::BIT_OR);
if (int_value != 0) {
- __ SmiOrConstant(operand->reg(), operand->reg(), int_value);
+ __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
}
}
deferred->BindExit();
(IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
operand->ToRegister();
frame_->Spill(operand->reg());
- DeferredCode* deferred = new DeferredInlineSmiOperation(op,
- operand->reg(),
- operand->reg(),
- smi_value,
- overwrite_mode);
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
// Check for negative or non-Smi left hand side.
__ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
if (int_value < 0) int_value = -int_value;
if (int_value == 1) {
- __ movl(operand->reg(), Immediate(Smi::FromInt(0)));
+ __ Move(operand->reg(), Smi::FromInt(0));
} else {
- __ SmiAndConstant(operand->reg(), operand->reg(), int_value - 1);
+ __ SmiAndConstant(operand->reg(),
+ operand->reg(),
+ Smi::FromInt(int_value - 1));
}
deferred->BindExit();
frame_->Push(operand);
// Check that the key is a non-negative smi.
__ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
- // Ensure that the smi is zero-extended. This is not guaranteed.
- __ movl(key.reg(), key.reg());
// Check that the receiver is not a smi.
__ JumpIfSmi(receiver.reg(), deferred->entry_label());
deferred->Branch(not_equal);
// Check that the key is within bounds. Both the key and the
- // length of the JSArray are smis, so compare only low 32 bits.
- __ cmpl(key.reg(),
- FieldOperand(receiver.reg(), JSArray::kLengthOffset));
- deferred->Branch(greater_equal);
+ // length of the JSArray are smis.
+ __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
+ key.reg());
+ deferred->Branch(less_equal);
// Get the elements array from the receiver and check that it
// is a flat array (not a dictionary).
Label slow;
Label done;
Label try_float;
- Label special;
// Check whether the value is a smi.
__ JumpIfNotSmi(rax, &try_float);
// Enter runtime system if the value of the smi is zero
// to make sure that we switch between 0 and -0.
- // Also enter it if the value of the smi is Smi::kMinValue
- __ testl(rax, Immediate(0x7FFFFFFE));
- __ j(zero, &special);
- __ negl(rax);
- __ jmp(&done);
+ // Also enter it if the value of the smi is Smi::kMinValue.
+ __ SmiNeg(rax, rax, &done);
- __ bind(&special);
- // Either zero or -0x4000000, neither of which become a smi when negated.
- __ testl(rax, rax);
- __ j(not_zero, &slow);
+ // Either zero or Smi::kMinValue, neither of which become a smi when negated.
+ __ SmiCompare(rax, Smi::FromInt(0));
+ __ j(not_equal, &slow);
__ Move(rax, Factory::minus_zero_value());
__ jmp(&done);
// Call builtin if operands are not floating point or smi.
Label check_for_symbols;
// Push arguments on stack, for helper functions.
- FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols);
+ FloatingPointHelper::CheckNumberOperands(masm, &check_for_symbols);
FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
__ FCmp();
ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
ncr = LESS;
}
- __ push(Immediate(Smi::FromInt(ncr)));
+ __ Push(Smi::FromInt(ncr));
}
// Restore return address on the stack.
__ ret(2 * kPointerSize);
__ bind(&is_not_instance);
- __ movq(rax, Immediate(Smi::FromInt(1)));
+ __ Move(rax, Smi::FromInt(1));
__ ret(2 * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &runtime);
// Value in rcx is Smi encoded.
// Check if the calling frame is an arguments adaptor frame.
Label adaptor;
__ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset));
- __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor);
// Check index against formal parameters count limit passed in
// Check if the calling frame is an arguments adaptor frame.
Label adaptor;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor);
// Nothing to do: The formal number of parameters has already been
// Push the stack frame type marker twice.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ push(Immediate(Smi::FromInt(marker))); // context slot
- __ push(Immediate(Smi::FromInt(marker))); // function slot
+ __ Push(Smi::FromInt(marker)); // context slot
+ __ Push(Smi::FromInt(marker)); // function slot
// Save callee-saved registers (X64 calling conventions).
__ push(r12);
__ push(r13);
// must be inserted below the return address on the stack so we
// temporarily store that in a register.
__ pop(rax);
- __ push(Immediate(Smi::FromInt(0)));
+ __ Push(Smi::FromInt(0));
__ push(rax);
// Do tail-call to runtime routine.
}
-void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
- Label* non_float) {
+void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
+ Label* non_float) {
Label test_other, done;
// Test if both operands are numbers (heap_numbers or smis).
// If not, jump to label non_float.
case Token::SHR:
case Token::SAR:
// Move the second operand into register ecx.
- __ movl(rcx, rbx);
+ __ movq(rcx, rbx);
// Perform the operation.
switch (op_) {
case Token::SAR:
- __ SmiShiftArithmeticRight(rax, rax, rbx);
+ __ SmiShiftArithmeticRight(rax, rax, rcx);
break;
case Token::SHR:
- __ SmiShiftLogicalRight(rax, rax, rbx, slow);
+ __ SmiShiftLogicalRight(rax, rax, rcx, slow);
break;
case Token::SHL:
- __ SmiShiftLeft(rax, rax, rbx, slow);
+ __ SmiShiftLeft(rax, rax, rcx, slow);
break;
default:
UNREACHABLE();
case Token::DIV: {
// rax: y
// rdx: x
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
+ FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
// Fast-case: Both operands are numbers.
// Allocate a heap number, if needed.
Label skip_allocation;
case Token::SAR:
case Token::SHL:
case Token::SHR: {
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
+ FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
// TODO(X64): Don't convert a Smi to float and then back to int32
// afterwards.
FloatingPointHelper::LoadFloatOperands(masm);
__ pop(rcx);
__ pop(rax);
switch (op_) {
- case Token::BIT_OR: __ or_(rax, rcx); break;
- case Token::BIT_AND: __ and_(rax, rcx); break;
- case Token::BIT_XOR: __ xor_(rax, rcx); break;
+ case Token::BIT_OR: __ orl(rax, rcx); break;
+ case Token::BIT_AND: __ andl(rax, rcx); break;
+ case Token::BIT_XOR: __ xorl(rax, rcx); break;
case Token::SAR: __ sarl(rax); break;
case Token::SHL: __ shll(rax); break;
case Token::SHR: __ shrl(rax); break;
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ testl(rax, Immediate(0xc0000000));
- __ j(not_zero, &non_smi_result);
- } else {
- // Check if result fits in a smi.
- __ cmpl(rax, Immediate(0xc0000000));
+ // Check if result is non-negative. This can only happen for a shift
+ // by zero, which also doesn't update the sign flag.
+ __ testl(rax, rax);
__ j(negative, &non_smi_result);
}
- // Tag smi result and return.
+ __ JumpIfNotValidSmiValue(rax, &non_smi_result);
+ // Tag smi result, if possible, and return.
__ Integer32ToSmi(rax, rax);
__ ret(2 * kPointerSize);
// All ops except SHR return a signed int32 that we load in a HeapNumber.
- if (op_ != Token::SHR) {
+ if (op_ != Token::SHR && non_smi_result.is_linked()) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
__ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
// Check that the value is a normal property.
__ bind(&done);
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
- Immediate(Smi::FromInt(PropertyDetails::TypeField::mask())));
+ __ Test(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
+ Smi::FromInt(PropertyDetails::TypeField::mask()));
__ j(not_zero, miss_label);
// Get the value at the masked, scaled index.
__ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
Immediate(kIsSymbolMask));
__ j(zero, &slow);
- // Probe the dictionary leaving result in ecx.
+ // Probe the dictionary leaving result in rcx.
GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax);
GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
__ movq(rax, rcx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
- // Array index string: If short enough use cache in length/hash field (ebx).
+ // Array index string: If short enough use cache in length/hash field (rbx).
// We assert that there are enough bits in an int32_t after the hash shift
// bits have been subtracted to allow space for the length and the cached
// array index.
__ movq(rbx, Operand(rsp, 1 * kPointerSize)); // 1 ~ return address
// Check that the key is a smi.
__ JumpIfNotSmi(rbx, &slow);
- // If it is a smi, make sure it is zero-extended, so it can be
- // used as an index in a memory operand.
- __ movl(rbx, rbx); // Clear the high bits of rbx.
__ CmpInstanceType(rcx, JS_ARRAY_TYPE);
__ j(equal, &array);
// Object case: Check key against length in the elements array.
// rax: value
// rdx: JSObject
- // rbx: index (as a smi), zero-extended.
+ // rbx: index (as a smi)
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
// rdx: JSArray
// rcx: FixedArray
// rbx: index (as a smi)
- // flags: compare (rbx, rdx.length())
+ // flags: smicompare (rdx.length(), rbx)
__ j(not_equal, &slow); // do not leave holes in the array
__ SmiToInteger64(rbx, rbx);
__ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
// Increment and restore smi-tag.
- __ Integer64AddToSmi(rbx, rbx, 1);
+ __ Integer64PlusConstantToSmi(rbx, rbx, 1);
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
- __ SmiSubConstant(rbx, rbx, 1, NULL);
+ __ SmiSubConstant(rbx, rbx, Smi::FromInt(1));
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS
// rdx: JSArray
// rbx: index (as a smi)
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &slow);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
- __ cmpl(rbx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ j(above_equal, &extra);
+ __ SmiCompare(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
+ __ j(below_equal, &extra);
// Fast case: Do the store.
__ bind(&fast);
// rax: value
// rcx: FixedArray
// rbx: index (as a smi)
- __ movq(Operand(rcx, rbx, times_half_pointer_size,
+ Label non_smi_value;
+ __ JumpIfNotSmi(rax, &non_smi_value);
+ SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+ __ movq(Operand(rcx, index.reg, index.scale,
FixedArray::kHeaderSize - kHeapObjectTag),
- rax);
+ rax);
+ __ ret(0);
+ __ bind(&non_smi_value);
+ // Slow case that needs to retain rbx for use by RecordWrite.
// Update write barrier for the elements array address.
+ SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rbx, kPointerSizeLog2);
+ __ movq(Operand(rcx, index2.reg, index2.scale,
+ FixedArray::kHeaderSize - kHeapObjectTag),
+ rax);
__ movq(rdx, rax);
- __ RecordWrite(rcx, 0, rdx, rbx);
+ __ RecordWriteNonSmi(rcx, 0, rdx, rbx);
__ ret(0);
}
namespace internal {
MacroAssembler::MacroAssembler(void* buffer, int size)
- : Assembler(buffer, size),
- unresolved_(0),
- generating_stub_(false),
- allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ : Assembler(buffer, size),
+ unresolved_(0),
+ generating_stub_(false),
+ allow_stub_calls_(true),
+ code_object_(Heap::undefined_value()) {
}
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index) {
+void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
movq(destination, Operand(r13, index << kPointerSizeLog2));
}
}
-void MacroAssembler::CompareRoot(Register with,
- Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
cmpq(with, Operand(r13, index << kPointerSizeLog2));
}
-void MacroAssembler::CompareRoot(Operand with,
- Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
LoadRoot(kScratchRegister, index);
cmpq(with, kScratchRegister);
}
// Minor key encoding in 12 bits of three registers (object, address and
// scratch) OOOOAAAASSSS.
- class ScratchBits: public BitField<uint32_t, 0, 4> {};
- class AddressBits: public BitField<uint32_t, 4, 4> {};
- class ObjectBits: public BitField<uint32_t, 8, 4> {};
+ class ScratchBits : public BitField<uint32_t, 0, 4> {};
+ class AddressBits : public BitField<uint32_t, 4, 4> {};
+ class ObjectBits : public BitField<uint32_t, 8, 4> {};
Major MajorKey() { return RecordWrite; }
// Set the remembered set bit for [object+offset].
// object is the object being stored into, value is the object being stored.
-// If offset is zero, then the scratch register contains the array index into
-// the elements array represented as a Smi.
+// If offset is zero, then the smi_index register contains the array index into
+// the elements array represented as a smi. Otherwise it can be used as a
+// scratch register.
// All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
- Register scratch) {
+ Register smi_index) {
// First, check if a remembered set write is even needed. The tests below
// catch stores of Smis and stores into young gen (which does not have space
// for the remembered set bits.
Label done;
+ JumpIfSmi(value, &done);
+ RecordWriteNonSmi(object, offset, value, smi_index);
+ bind(&done);
+}
+
+
+void MacroAssembler::RecordWriteNonSmi(Register object,
+ int offset,
+ Register scratch,
+ Register smi_index) {
+ Label done;
// Test that the object address is not in the new space. We cannot
// set remembered set bits in the new space.
- movq(value, object);
+ movq(scratch, object);
ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
- and_(value, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+ and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
movq(kScratchRegister, ExternalReference::new_space_start());
- cmpq(value, kScratchRegister);
+ cmpq(scratch, kScratchRegister);
j(equal, &done);
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
// Compute the bit offset in the remembered set, leave it in 'value'.
- lea(value, Operand(object, offset));
+ lea(scratch, Operand(object, offset));
ASSERT(is_int32(Page::kPageAlignmentMask));
- and_(value, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
- shr(value, Immediate(kObjectAlignmentBits));
+ and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
+ shr(scratch, Immediate(kObjectAlignmentBits));
// Compute the page address from the heap object pointer, leave it in
// 'object' (immediate value is sign extended).
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
- bts(Operand(object, Page::kRSetOffset), value);
+ bts(Operand(object, Page::kRSetOffset), scratch);
} else {
- Register dst = scratch;
+ Register dst = smi_index;
if (offset != 0) {
lea(dst, Operand(object, offset));
} else {
// array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric. Multiply a smi by 4 to get an offset
- // into an array of pointers.
- lea(dst, Operand(object, dst, times_half_pointer_size,
+ // KeyedStoreIC::GenerateGeneric.
+ SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
+ lea(dst, Operand(object,
+ index.reg,
+ index.scale,
FixedArray::kHeaderSize - kHeapObjectTag));
}
// If we are already generating a shared stub, not inlining the
// record write code isn't going to save us any memory.
if (generating_stub()) {
- RecordWriteHelper(this, object, dst, value);
+ RecordWriteHelper(this, object, dst, scratch);
} else {
- RecordWriteStub stub(object, dst, value);
+ RecordWriteStub stub(object, dst, scratch);
CallStub(&stub);
}
}
addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
}
-
Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
bool* resolved) {
// Move the builtin function into the temporary function slot by
JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
movq(rdi, FieldOperand(rdx, builtins_offset));
-
return Builtins::GetCode(id, resolved);
}
-
-void MacroAssembler::Set(Register dst, int64_t x) {
- if (x == 0) {
- xor_(dst, dst);
- } else if (is_int32(x)) {
- movq(dst, Immediate(x));
- } else if (is_uint32(x)) {
- movl(dst, Immediate(x));
+
+void MacroAssembler::Set(Register dst, int64_t x) {
+ if (x == 0) {
+ xor_(dst, dst);
+ } else if (is_int32(x)) {
+ movq(dst, Immediate(x));
+ } else if (is_uint32(x)) {
+ movl(dst, Immediate(x));
+ } else {
+ movq(dst, x, RelocInfo::NONE);
+ }
+}
+
+
+void MacroAssembler::Set(const Operand& dst, int64_t x) {
+ if (x == 0) {
+ xor_(kScratchRegister, kScratchRegister);
+ movq(dst, kScratchRegister);
+ } else if (is_int32(x)) {
+ movq(dst, Immediate(x));
+ } else if (is_uint32(x)) {
+ movl(dst, Immediate(x));
+ } else {
+ movq(kScratchRegister, x, RelocInfo::NONE);
+ movq(dst, kScratchRegister);
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Smi tagging, untagging and tag detection.
+
+#ifdef V8_LONG_SMI
+
+static int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
+ ASSERT_EQ(0, kSmiTag);
+ if (!dst.is(src)) {
+ movl(dst, src);
+ }
+ shl(dst, Immediate(kSmiShift));
+}
+
+
+void MacroAssembler::Integer32ToSmi(Register dst,
+ Register src,
+ Label* on_overflow) {
+ ASSERT_EQ(0, kSmiTag);
+ // 32-bit integer always fits in a long smi.
+ if (!dst.is(src)) {
+ movl(dst, src);
+ }
+ shl(dst, Immediate(kSmiShift));
+}
+
+
+void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
+ Register src,
+ int constant) {
+ if (dst.is(src)) {
+ addq(dst, Immediate(constant));
+ } else {
+ lea(dst, Operand(src, constant));
+ }
+ shl(dst, Immediate(kSmiShift));
+}
+
+
+void MacroAssembler::SmiToInteger32(Register dst, Register src) {
+ ASSERT_EQ(0, kSmiTag);
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ shr(dst, Immediate(kSmiShift));
+}
+
+
+void MacroAssembler::SmiToInteger64(Register dst, Register src) {
+ ASSERT_EQ(0, kSmiTag);
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ sar(dst, Immediate(kSmiShift));
+}
+
+
+void MacroAssembler::SmiTest(Register src) {
+ testq(src, src);
+}
+
+
+void MacroAssembler::SmiCompare(Register dst, Register src) {
+ cmpq(dst, src);
+}
+
+
+void MacroAssembler::SmiCompare(Register dst, Smi* src) {
+ ASSERT(!dst.is(kScratchRegister));
+ if (src->value() == 0) {
+ testq(dst, dst);
+ } else {
+ Move(kScratchRegister, src);
+ cmpq(dst, kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
+ cmpq(dst, src);
+}
+
+
+void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
+ if (src->value() == 0) {
+ // Only tagged long smi to have 32-bit representation.
+ cmpq(dst, Immediate(0));
+ } else {
+ Move(kScratchRegister, src);
+ cmpq(dst, kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
+ Register src,
+ int power) {
+ ASSERT(power >= 0);
+ ASSERT(power < 64);
+ if (power == 0) {
+ SmiToInteger64(dst, src);
+ return;
+ }
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ if (power < kSmiShift) {
+ sar(dst, Immediate(kSmiShift - power));
+ } else if (power > kSmiShift) {
+ shl(dst, Immediate(power - kSmiShift));
+ }
+}
+
+
+Condition MacroAssembler::CheckSmi(Register src) {
+ ASSERT_EQ(0, kSmiTag);
+ testb(src, Immediate(kSmiTagMask));
+ return zero;
+}
+
+
+Condition MacroAssembler::CheckPositiveSmi(Register src) {
+ ASSERT_EQ(0, kSmiTag);
+ movq(kScratchRegister, src);
+ rol(kScratchRegister, Immediate(1));
+ testl(kScratchRegister, Immediate(0x03));
+ return zero;
+}
+
+
+Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
+ if (first.is(second)) {
+ return CheckSmi(first);
+ }
+ movl(kScratchRegister, first);
+ orl(kScratchRegister, second);
+ testb(kScratchRegister, Immediate(kSmiTagMask));
+ return zero;
+}
+
+
+Condition MacroAssembler::CheckIsMinSmi(Register src) {
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ movq(kScratchRegister, src);
+ rol(kScratchRegister, Immediate(1));
+ cmpq(kScratchRegister, Immediate(1));
+ return equal;
+}
+
+
+Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
+ // A 32-bit integer value can always be converted to a smi.
+ return always;
+}
+
+
+void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
+ if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ movq(kScratchRegister, src);
+ neg(dst); // Low 32 bits are retained as zero by negation.
+ // Test if result is zero or Smi::kMinValue.
+ cmpq(dst, kScratchRegister);
+ j(not_equal, on_smi_result);
+ movq(src, kScratchRegister);
+ } else {
+ movq(dst, src);
+ neg(dst);
+ cmpq(dst, src);
+ // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+ j(not_equal, on_smi_result);
+ }
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ addq(dst, src2);
+ Label smi_result;
+ j(no_overflow, &smi_result);
+ // Restore src1.
+ subq(src1, src2);
+ jmp(on_not_smi_result);
+ bind(&smi_result);
+ } else {
+ movq(dst, src1);
+ addq(dst, src2);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ subq(dst, src2);
+ Label smi_result;
+ j(no_overflow, &smi_result);
+ // Restore src1.
+ addq(src1, src2);
+ jmp(on_not_smi_result);
+ bind(&smi_result);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+void MacroAssembler::SmiMul(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!dst.is(src2));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+
+ if (dst.is(src1)) {
+ Label failure, zero_correct_result;
+ movq(kScratchRegister, src1); // Create backup for later testing.
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, &failure);
+
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ Label correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result);
+
+ movq(dst, kScratchRegister);
+ xor_(dst, src2);
+ j(positive, &zero_correct_result); // Result was positive zero.
+
+ bind(&failure); // Reused failure exit, restores src1.
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+
+ bind(&zero_correct_result);
+ xor_(dst, dst);
+
+ bind(&correct_result);
+ } else {
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, on_not_smi_result);
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ Label correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result);
+ // One of src1 and src2 is zero, the check whether the other is
+ // negative.
+ movq(kScratchRegister, src1);
+ xor_(kScratchRegister, src2);
+ j(negative, on_not_smi_result);
+ bind(&correct_result);
+ }
+}
+
+
+void MacroAssembler::SmiTryAddConstant(Register dst,
+ Register src,
+ Smi* constant,
+ Label* on_not_smi_result) {
+ // Does not assume that src is a smi.
+ ASSERT_EQ(1, kSmiTagMask);
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src.is(kScratchRegister));
+
+ JumpIfNotSmi(src, on_not_smi_result);
+ Register tmp = (dst.is(src) ? kScratchRegister : dst);
+ Move(tmp, constant);
+ addq(tmp, src);
+ j(overflow, on_not_smi_result);
+ if (dst.is(src)) {
+ movq(dst, tmp);
+ }
+}
+
+
+void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+
+ Move(kScratchRegister, constant);
+ addq(dst, kScratchRegister);
+ } else {
+ Move(dst, constant);
+ addq(dst, src);
+ }
+}
+
+
+void MacroAssembler::SmiAddConstant(Register dst,
+ Register src,
+ Smi* constant,
+ Label* on_not_smi_result) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+
+ Move(kScratchRegister, constant);
+ addq(dst, kScratchRegister);
+ Label result_ok;
+ j(no_overflow, &result_ok);
+ subq(dst, kScratchRegister);
+ jmp(on_not_smi_result);
+ bind(&result_ok);
+ } else {
+ Move(dst, constant);
+ addq(dst, src);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+
+ Move(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ } else {
+ // Subtract by adding the negative, to do it in two operations.
+ if (constant->value() == Smi::kMinValue) {
+ Move(kScratchRegister, constant);
+ movq(dst, src);
+ subq(dst, kScratchRegister);
+ } else {
+ Move(dst, Smi::FromInt(-constant->value()));
+ addq(dst, src);
+ }
+ }
+}
+
+
+void MacroAssembler::SmiSubConstant(Register dst,
+ Register src,
+ Smi* constant,
+ Label* on_not_smi_result) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+
+ Move(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ Label sub_success;
+ j(no_overflow, &sub_success);
+ addq(src, kScratchRegister);
+ jmp(on_not_smi_result);
+ bind(&sub_success);
+ } else {
+ if (constant->value() == Smi::kMinValue) {
+ Move(kScratchRegister, constant);
+ movq(dst, src);
+ subq(dst, kScratchRegister);
+ j(overflow, on_not_smi_result);
+ } else {
+ Move(dst, Smi::FromInt(-(constant->value())));
+ addq(dst, src);
+ j(overflow, on_not_smi_result);
+ }
+ }
+}
+
+
+void MacroAssembler::SmiDiv(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+
+ // Check for 0 divisor (result is +/-Infinity).
+ Label positive_divisor;
+ testq(src2, src2);
+ j(zero, on_not_smi_result);
+
+ if (src1.is(rax)) {
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(rax, src1);
+ // We need to rule out dividing Smi::kMinValue by -1, since that would
+ // overflow in idiv and raise an exception.
+ // We combine this with negative zero test (negative zero only happens
+ // when dividing zero by a negative number).
+
+ // We overshoot a little and go to slow case if we divide min-value
+ // by any negative value, not just -1.
+ Label safe_div;
+ testl(rax, Immediate(0x7fffffff));
+ j(not_zero, &safe_div);
+ testq(src2, src2);
+ if (src1.is(rax)) {
+ j(positive, &safe_div);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+ } else {
+ j(negative, on_not_smi_result);
+ }
+ bind(&safe_div);
+
+ SmiToInteger32(src2, src2);
+ // Sign extend src1 into edx:eax.
+ cdq();
+ idivl(src2);
+ Integer32ToSmi(src2, src2);
+ // Check that the remainder is zero.
+ testl(rdx, rdx);
+ if (src1.is(rax)) {
+ Label smi_result;
+ j(zero, &smi_result);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+ bind(&smi_result);
+ } else {
+ j(not_zero, on_not_smi_result);
+ }
+ if (!dst.is(src1) && src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ Integer32ToSmi(dst, rax);
+}
+
+
+void MacroAssembler::SmiMod(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+ ASSERT(!src1.is(src2));
+
+ testq(src2, src2);
+ j(zero, on_not_smi_result);
+
+ if (src1.is(rax)) {
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(rax, src1);
+ SmiToInteger32(src2, src2);
+
+ // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+ Label safe_div;
+ cmpl(rax, Immediate(Smi::kMinValue));
+ j(not_equal, &safe_div);
+ cmpl(src2, Immediate(-1));
+ j(not_equal, &safe_div);
+ // Retag inputs and go slow case.
+ Integer32ToSmi(src2, src2);
+ if (src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ jmp(on_not_smi_result);
+ bind(&safe_div);
+
+ // Sign extend eax into edx:eax.
+ cdq();
+ idivl(src2);
+ // Restore smi tags on inputs.
+ Integer32ToSmi(src2, src2);
+ if (src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ // Check for a negative zero result. If the result is zero, and the
+ // dividend is negative, go slow to return a floating point negative zero.
+ Label smi_result;
+ testl(rdx, rdx);
+ j(not_zero, &smi_result);
+ testq(src1, src1);
+ j(negative, on_not_smi_result);
+ bind(&smi_result);
+ Integer32ToSmi(dst, rdx);
+}
+
+
+void MacroAssembler::SmiNot(Register dst, Register src) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src.is(kScratchRegister));
+ // Set tag and padding bits before negating, so that they are zero afterwards.
+ movl(kScratchRegister, Immediate(~0));
+ if (dst.is(src)) {
+ xor_(dst, kScratchRegister);
+ } else {
+ lea(dst, Operand(src, kScratchRegister, times_1, 0));
+ }
+ not_(dst);
+}
+
+
+void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
+ ASSERT(!dst.is(src2));
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
+ and_(dst, src2);
+}
+
+
+void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
+ if (constant->value() == 0) {
+ xor_(dst, dst);
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ Move(kScratchRegister, constant);
+ and_(dst, kScratchRegister);
+ } else {
+ Move(dst, constant);
+ and_(dst, src);
+ }
+}
+
+
+void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
+ or_(dst, src2);
+}
+
+
+void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
+ if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ Move(kScratchRegister, constant);
+ or_(dst, kScratchRegister);
+ } else {
+ Move(dst, constant);
+ or_(dst, src);
+ }
+}
+
+
+void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
+ xor_(dst, src2);
+}
+
+
+void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
+ if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ Move(kScratchRegister, constant);
+ xor_(dst, kScratchRegister);
+ } else {
+ Move(dst, constant);
+ xor_(dst, src);
+ }
+}
+
+
+void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
+ Register src,
+ int shift_value) {
+ ASSERT(is_uint5(shift_value));
+ if (shift_value > 0) {
+ if (dst.is(src)) {
+ sar(dst, Immediate(shift_value + kSmiShift));
+ shl(dst, Immediate(kSmiShift));
+ } else {
+ UNIMPLEMENTED(); // Not used.
+ }
+ }
+}
+
+
+void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
+ Register src,
+ int shift_value,
+ Label* on_not_smi_result) {
+ // Logic right shift interprets its result as an *unsigned* number.
+ if (dst.is(src)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ movq(dst, src);
+ if (shift_value == 0) {
+ testq(dst, dst);
+ j(negative, on_not_smi_result);
+ }
+ shr(dst, Immediate(shift_value + kSmiShift));
+ shl(dst, Immediate(kSmiShift));
+ }
+}
+
+
+void MacroAssembler::SmiShiftLeftConstant(Register dst,
+ Register src,
+ int shift_value,
+ Label* on_not_smi_result) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ if (shift_value > 0) {
+ shl(dst, Immediate(shift_value));
+ }
+}
+
+
+void MacroAssembler::SmiShiftLeft(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!dst.is(rcx));
+ Label result_ok;
+ // Untag shift amount.
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
+ SmiToInteger32(rcx, src2);
+ // Shift amount specified by lower 5 bits, not six as the shl opcode.
+ and_(rcx, Immediate(0x1f));
+ shl(dst);
+}
+
+
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smi_result) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(rcx));
+ Label result_ok;
+ if (src1.is(rcx) || src2.is(rcx)) {
+ movq(kScratchRegister, rcx);
+ }
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
+ SmiToInteger32(rcx, src2);
+ orl(rcx, Immediate(kSmiShift));
+ shr(dst); // Shift is rcx modulo 0x1f + 32.
+ shl(dst, Immediate(kSmiShift));
+ testq(dst, dst);
+ if (src1.is(rcx) || src2.is(rcx)) {
+ Label positive_result;
+ j(positive, &positive_result);
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ jmp(on_not_smi_result);
+ bind(&positive_result);
+ } else {
+ j(negative, on_not_smi_result); // src2 was zero and src1 negative.
+ }
+}
+
+
+void MacroAssembler::SmiShiftArithmeticRight(Register dst,
+ Register src1,
+ Register src2) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(rcx));
+ if (src1.is(rcx)) {
+ movq(kScratchRegister, src1);
+ } else if (src2.is(rcx)) {
+ movq(kScratchRegister, src2);
+ }
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
+ SmiToInteger32(rcx, src2);
+ orl(rcx, Immediate(kSmiShift));
+ sar(dst); // Shift 32 + original rcx & 0x1f.
+ shl(dst, Immediate(kSmiShift));
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else if (src2.is(rcx)) {
+ movq(src2, kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::SelectNonSmi(Register dst,
+ Register src1,
+ Register src2,
+ Label* on_not_smis) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(src1));
+ ASSERT(!dst.is(src2));
+ // Both operands must not be smis.
+#ifdef DEBUG
+ if (allow_stub_calls()) { // Check contains a stub call.
+ Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+ Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+ }
+#endif
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ movl(kScratchRegister, Immediate(kSmiTagMask));
+ and_(kScratchRegister, src1);
+ testl(kScratchRegister, src2);
+ // If non-zero then both are smis.
+ j(not_zero, on_not_smis);
+
+ // Exactly one operand is a smi.
+ ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+ // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+ subq(kScratchRegister, Immediate(1));
+ // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+ movq(dst, src1);
+ xor_(dst, src2);
+ and_(dst, kScratchRegister);
+ // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+ xor_(dst, src1);
+ // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
+}
+
+SmiIndex MacroAssembler::SmiToIndex(Register dst,
+ Register src,
+ int shift) {
+ ASSERT(is_uint6(shift));
+ // There is a possible optimization if shift is in the range 60-63, but that
+ // will (and must) never happen.
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ if (shift < kSmiShift) {
+ sar(dst, Immediate(kSmiShift - shift));
} else {
- movq(dst, x, RelocInfo::NONE);
+ shl(dst, Immediate(shift - kSmiShift));
}
+ return SmiIndex(dst, times_1);
}
-
-void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (x == 0) {
- xor_(kScratchRegister, kScratchRegister);
- movq(dst, kScratchRegister);
- } else if (is_int32(x)) {
- movq(dst, Immediate(x));
- } else if (is_uint32(x)) {
- movl(dst, Immediate(x));
+SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
+ Register src,
+ int shift) {
+ // Register src holds a positive smi.
+ ASSERT(is_uint6(shift));
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ neg(dst);
+ if (shift < kSmiShift) {
+ sar(dst, Immediate(kSmiShift - shift));
} else {
- movq(kScratchRegister, x, RelocInfo::NONE);
- movq(dst, kScratchRegister);
+ shl(dst, Immediate(shift - kSmiShift));
}
+ return SmiIndex(dst, times_1);
}
+#else // ! V8_LONG_SMI
+// 31 bit smi operations
-// ----------------------------------------------------------------------------
-// Smi tagging, untagging and tag detection.
+// Extracts the low 32 bits of a Smi pointer, where the taqgged smi value
+// is stored.
+static int32_t SmiValue(Smi* smi) {
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(smi));
+}
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
#ifdef DEBUG
- cmpq(src, Immediate(0xC0000000u));
+ if (allow_stub_calls()) {
+ cmpl(src, Immediate(0xC0000000u));
Check(positive, "Smi conversion overflow");
+ }
#endif
if (dst.is(src)) {
addl(dst, src);
}
-void MacroAssembler::Integer64AddToSmi(Register dst,
- Register src,
- int constant) {
+void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
+ Register src,
+ int constant) {
#ifdef DEBUG
- movl(kScratchRegister, src);
- addl(kScratchRegister, Immediate(constant));
- Check(no_overflow, "Add-and-smi-convert overflow");
- Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
- Check(valid, "Add-and-smi-convert overflow");
+ if (allow_stub_calls()) {
+ movl(kScratchRegister, src);
+ addl(kScratchRegister, Immediate(constant));
+ Check(no_overflow, "Add-and-smi-convert overflow");
+ Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
+ Check(valid, "Add-and-smi-convert overflow");
+ }
#endif
lea(dst, Operand(src, src, times_1, constant << kSmiTagSize));
}
}
-void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
- Register src,
- int power) {
- ASSERT(power >= 0);
- ASSERT(power < 64);
- if (power == 0) {
- SmiToInteger64(dst, src);
- return;
- }
- movsxlq(dst, src);
- shl(dst, Immediate(power - 1));
-}
-
-void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
- ASSERT_EQ(0, kSmiTag);
- testl(src, Immediate(kSmiTagMask));
- j(zero, on_smi);
-}
-
-
-void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
- Condition not_smi = CheckNotSmi(src);
- j(not_smi, on_not_smi);
+void MacroAssembler::SmiTest(Register src) {
+ testl(src, src);
}
-void MacroAssembler::JumpIfNotPositiveSmi(Register src,
- Label* on_not_positive_smi) {
- Condition not_positive_smi = CheckNotPositiveSmi(src);
- j(not_positive_smi, on_not_positive_smi);
+void MacroAssembler::SmiCompare(Register dst, Register src) {
+ cmpl(dst, src);
}
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
- int constant,
- Label* on_equals) {
- if (Smi::IsValid(constant)) {
- Condition are_equal = CheckSmiEqualsConstant(src, constant);
- j(are_equal, on_equals);
+void MacroAssembler::SmiCompare(Register dst, Smi* src) {
+ ASSERT(!dst.is(kScratchRegister));
+ if (src->value() == 0) {
+ testl(dst, dst);
+ } else {
+ cmpl(dst, Immediate(SmiValue(src)));
}
}
-void MacroAssembler::JumpIfSmiGreaterEqualsConstant(Register src,
- int constant,
- Label* on_greater_equals) {
- if (Smi::IsValid(constant)) {
- Condition are_greater_equal = CheckSmiGreaterEqualsConstant(src, constant);
- j(are_greater_equal, on_greater_equals);
- } else if (constant < Smi::kMinValue) {
- jmp(on_greater_equals);
- }
+void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
+ cmpl(dst, src);
}
-void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
- Condition is_valid = CheckInteger32ValidSmiValue(src);
- j(ReverseCondition(is_valid), on_invalid);
+void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
+ if (src->value() == 0) {
+ movl(kScratchRegister, dst);
+ testl(kScratchRegister, kScratchRegister);
+ } else {
+ cmpl(dst, Immediate(SmiValue(src)));
+ }
}
-
-void MacroAssembler::JumpIfNotBothSmi(Register src1,
- Register src2,
- Label* on_not_both_smi) {
- Condition not_both_smi = CheckNotBothSmi(src1, src2);
- j(not_both_smi, on_not_both_smi);
+void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
+ Register src,
+ int power) {
+ ASSERT(power >= 0);
+ ASSERT(power < 64);
+ if (power == 0) {
+ SmiToInteger64(dst, src);
+ return;
+ }
+ movsxlq(dst, src);
+ shl(dst, Immediate(power - 1));
}
Condition MacroAssembler::CheckSmi(Register src) {
return zero;
}
-
-Condition MacroAssembler::CheckNotSmi(Register src) {
- ASSERT_EQ(0, kSmiTag);
- testb(src, Immediate(kSmiTagMask));
- return not_zero;
-}
-
-
Condition MacroAssembler::CheckPositiveSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
return zero;
}
-
-Condition MacroAssembler::CheckNotPositiveSmi(Register src) {
- ASSERT_EQ(0, kSmiTag);
- testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
- return not_zero;
-}
-
-
Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
if (first.is(second)) {
return CheckSmi(first);
return CheckSmi(kScratchRegister);
}
-
-Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) {
- ASSERT_EQ(0, kSmiTag);
- if (first.is(second)) {
- return CheckNotSmi(first);
- }
- movl(kScratchRegister, first);
- or_(kScratchRegister, second);
- return CheckNotSmi(kScratchRegister);
-}
-
-
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- cmpl(src, Immediate(0x40000000));
+ cmpl(src, Immediate(0x80000000u));
return equal;
}
-Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) {
- if (constant == 0) {
- testl(src, src);
- return zero;
- }
- if (Smi::IsValid(constant)) {
- cmpl(src, Immediate(Smi::FromInt(constant)));
- return zero;
- }
- // Can't be equal.
- UNREACHABLE();
- return no_condition;
-}
-
-
-Condition MacroAssembler::CheckSmiGreaterEqualsConstant(Register src,
- int constant) {
- if (constant == 0) {
- testl(src, Immediate(static_cast<uint32_t>(0x80000000u)));
- return positive;
- }
- if (Smi::IsValid(constant)) {
- cmpl(src, Immediate(Smi::FromInt(constant)));
- return greater_equal;
- }
- // Can't be equal.
- UNREACHABLE();
- return no_condition;
-}
-
-
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
// A 32-bit integer value can be converted to a smi if it is in the
// range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
void MacroAssembler::SmiNeg(Register dst,
Register src,
- Label* on_not_smi_result) {
+ Label* on_smi_result) {
if (!dst.is(src)) {
movl(dst, src);
}
negl(dst);
testl(dst, Immediate(0x7fffffff));
// If the result is zero or 0x80000000, negation failed to create a smi.
- j(equal, on_not_smi_result);
+ j(not_equal, on_smi_result);
}
}
-
void MacroAssembler::SmiSub(Register dst,
Register src1,
Register src2,
ASSERT(!dst.is(src2));
if (dst.is(src1)) {
+ // Copy src1 before overwriting.
movq(kScratchRegister, src1);
}
SmiToInteger32(dst, src1);
j(not_zero, &non_zero_result);
// Test whether either operand is negative (the other must be zero).
+ if (!dst.is(src1)) {
+ movl(kScratchRegister, src1);
+ }
orl(kScratchRegister, src2);
j(negative, on_not_smi_result);
+
bind(&non_zero_result);
}
void MacroAssembler::SmiTryAddConstant(Register dst,
Register src,
- int32_t constant,
+ Smi* constant,
Label* on_not_smi_result) {
// Does not assume that src is a smi.
ASSERT_EQ(1, kSmiTagMask);
ASSERT_EQ(0, kSmiTag);
- ASSERT(Smi::IsValid(constant));
Register tmp = (src.is(dst) ? kScratchRegister : dst);
movl(tmp, src);
- addl(tmp, Immediate(Smi::FromInt(constant)));
+ addl(tmp, Immediate(SmiValue(constant)));
if (tmp.is(kScratchRegister)) {
j(overflow, on_not_smi_result);
testl(tmp, Immediate(kSmiTagMask));
void MacroAssembler::SmiAddConstant(Register dst,
Register src,
- int32_t constant,
+ Smi* constant) {
+ ASSERT_EQ(1, kSmiTagMask);
+ ASSERT_EQ(0, kSmiTag);
+ int32_t smi_value = SmiValue(constant);
+ if (dst.is(src)) {
+ addl(dst, Immediate(smi_value));
+ } else {
+ lea(dst, Operand(src, smi_value));
+ }
+}
+
+
+void MacroAssembler::SmiAddConstant(Register dst,
+ Register src,
+ Smi* constant,
Label* on_not_smi_result) {
- ASSERT(Smi::IsValid(constant));
- if (on_not_smi_result == NULL) {
- if (dst.is(src)) {
- movl(dst, src);
- } else {
- lea(dst, Operand(src, constant << kSmiTagSize));
- }
+ ASSERT_EQ(1, kSmiTagMask);
+ ASSERT_EQ(0, kSmiTag);
+ int32_t smi_value = SmiValue(constant);
+ if (!dst.is(src)) {
+ movl(dst, src);
+ addl(dst, Immediate(smi_value));
+ j(overflow, on_not_smi_result);
} else {
- if (!dst.is(src)) {
- movl(dst, src);
- }
- addl(dst, Immediate(Smi::FromInt(constant)));
- if (!dst.is(src)) {
- j(overflow, on_not_smi_result);
- } else {
- Label result_ok;
- j(no_overflow, &result_ok);
- subl(dst, Immediate(Smi::FromInt(constant)));
- jmp(on_not_smi_result);
- bind(&result_ok);
- }
+ addl(dst, Immediate(smi_value));
+ Label result_ok;
+ j(no_overflow, &result_ok);
+ subl(dst, Immediate(smi_value));
+ jmp(on_not_smi_result);
+ bind(&result_ok);
+ }
+}
+
+
+void MacroAssembler::SmiSubConstant(Register dst,
+ Register src,
+ Smi* constant) {
+ ASSERT_EQ(1, kSmiTagMask);
+ ASSERT_EQ(0, kSmiTag);
+ if (!dst.is(src)) {
+ movl(dst, src);
}
+ subl(dst, Immediate(SmiValue(constant)));
}
void MacroAssembler::SmiSubConstant(Register dst,
Register src,
- int32_t constant,
+ Smi* constant,
Label* on_not_smi_result) {
- ASSERT(Smi::IsValid(constant));
- Smi* smi_value = Smi::FromInt(constant);
+ ASSERT_EQ(1, kSmiTagMask);
+ ASSERT_EQ(0, kSmiTag);
+ int32_t smi_value = SmiValue(constant);
if (dst.is(src)) {
// Optimistic subtract - may change value of dst register,
// if it has garbage bits in the higher half, but will not change
// the value as a tagged smi.
subl(dst, Immediate(smi_value));
- if (on_not_smi_result != NULL) {
- Label add_success;
- j(no_overflow, &add_success);
- addl(dst, Immediate(smi_value));
- jmp(on_not_smi_result);
- bind(&add_success);
- }
+ Label add_success;
+ j(no_overflow, &add_success);
+ addl(dst, Immediate(smi_value));
+ jmp(on_not_smi_result);
+ bind(&add_success);
} else {
- UNIMPLEMENTED(); // Not used yet.
+ movl(dst, src);
+ subl(dst, Immediate(smi_value));
+ j(overflow, on_not_smi_result);
}
}
}
-void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) {
- ASSERT(Smi::IsValid(constant));
+void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
if (!dst.is(src)) {
movl(dst, src);
}
- and_(dst, Immediate(Smi::FromInt(constant)));
+ int32_t smi_value = SmiValue(constant);
+ and_(dst, Immediate(smi_value));
}
}
-void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) {
- ASSERT(Smi::IsValid(constant));
+void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
if (!dst.is(src)) {
movl(dst, src);
}
- or_(dst, Immediate(Smi::FromInt(constant)));
+ int32_t smi_value = SmiValue(constant);
+ or_(dst, Immediate(smi_value));
}
+
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
movl(dst, src1);
}
-void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) {
- ASSERT(Smi::IsValid(constant));
+void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
if (!dst.is(src)) {
movl(dst, src);
}
- xor_(dst, Immediate(Smi::FromInt(constant)));
+ int32_t smi_value = SmiValue(constant);
+ xor_(dst, Immediate(smi_value));
}
-
void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value) {
int shift_value,
Label* on_not_smi_result) {
if (dst.is(src)) {
- UNIMPLEMENTED(); // Not used.
+ if (shift_value > 0) {
+ movq(kScratchRegister, src);
+ // Treat scratch as an untagged integer value equal to two times the
+ // smi value of src, i.e., already shifted left by one.
+ if (shift_value > 1) {
+ shll(kScratchRegister, Immediate(shift_value - 1));
+ }
+ JumpIfNotValidSmiValue(kScratchRegister, on_not_smi_result);
+ // Convert int result to Smi, checking that it is in smi range.
+ ASSERT(kSmiTagSize == 1); // adjust code if not the case
+ Integer32ToSmi(dst, kScratchRegister);
+ }
} else {
movl(dst, src);
if (shift_value > 0) {
ASSERT(!dst.is(rcx));
Label result_ok;
// Untag both operands.
+ if (dst.is(src1) || src1.is(rcx)) {
+ movq(kScratchRegister, src1);
+ }
SmiToInteger32(dst, src1);
SmiToInteger32(rcx, src2);
shll(dst);
j(is_valid, &result_ok);
// Restore the relevant bits of the source registers
// and call the slow version.
- if (dst.is(src1)) {
- shrl(dst);
- Integer32ToSmi(dst, dst);
+ if (dst.is(src1) || src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ }
+ if (src2.is(rcx)) {
+ Integer32ToSmi(rcx, rcx);
}
- Integer32ToSmi(rcx, rcx);
jmp(on_not_smi_result);
bind(&result_ok);
Integer32ToSmi(dst, dst);
Register src1,
Register src2,
Label* on_not_smi_result) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
ASSERT(!dst.is(rcx));
Label result_ok;
// Untag both operands.
+ if (src1.is(rcx)) {
+ movq(kScratchRegister, src1);
+ }
SmiToInteger32(dst, src1);
SmiToInteger32(rcx, src2);
if (dst.is(src1)) {
shll(dst);
Integer32ToSmi(dst, dst);
+ } else if (src1.is(rcx)) {
+ movq(rcx, kScratchRegister);
+ } else if (src2.is(rcx)) {
+ Integer32ToSmi(src2, src2);
}
- Integer32ToSmi(rcx, rcx);
jmp(on_not_smi_result);
bind(&result_ok);
// Smi-tag the result in answer.
ASSERT(!dst.is(src2));
// Both operands must not be smis.
#ifdef DEBUG
- Condition not_both_smis = CheckNotBothSmi(src1, src2);
- Check(not_both_smis, "Both registers were smis.");
+ if (allow_stub_calls()) {
+ Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+ Check(not_both_smis, "Both registers were smis.");
+ }
#endif
ASSERT_EQ(0, kSmiTag);
ASSERT_EQ(0, Smi::FromInt(0));
return SmiIndex(dst, times_1);
}
+#endif // V8_LONG_SMI
+
+
+void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
+ ASSERT_EQ(0, kSmiTag);
+ Condition smi = CheckSmi(src);
+ j(smi, on_smi);
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
+ Condition smi = CheckSmi(src);
+ j(NegateCondition(smi), on_not_smi);
+}
+
+
+void MacroAssembler::JumpIfNotPositiveSmi(Register src,
+ Label* on_not_positive_smi) {
+ Condition positive_smi = CheckPositiveSmi(src);
+ j(NegateCondition(positive_smi), on_not_positive_smi);
+}
+
+
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+ Smi* constant,
+ Label* on_equals) {
+ SmiCompare(src, constant);
+ j(equal, on_equals);
+}
+
+
+void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
+ Condition is_valid = CheckInteger32ValidSmiValue(src);
+ j(NegateCondition(is_valid), on_invalid);
+}
+
+void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
+ Label* on_not_both_smi) {
+ Condition both_smi = CheckBothSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi);
+}
bool MacroAssembler::IsUnsafeSmi(Smi* value) {
return false;
}
+
void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) {
UNIMPLEMENTED();
}
+void MacroAssembler::Move(Register dst, Smi* source) {
+ if (IsUnsafeSmi(source)) {
+ LoadUnsafeSmi(dst, source);
+ } else {
+ Set(dst, reinterpret_cast<int64_t>(source));
+ }
+}
+
+
+void MacroAssembler::Move(const Operand& dst, Smi* source) {
+ if (IsUnsafeSmi(source)) {
+ LoadUnsafeSmi(kScratchRegister, source);
+ movq(dst, kScratchRegister);
+ } else {
+ Set(dst, reinterpret_cast<int64_t>(source));
+ }
+}
+
+
void MacroAssembler::Move(Register dst, Handle<Object> source) {
ASSERT(!source->IsFailure());
if (source->IsSmi()) {
- if (IsUnsafeSmi(source)) {
- LoadUnsafeSmi(dst, source);
- } else {
- int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
- movq(dst, Immediate(smi));
- }
+ Move(dst, Smi::cast(*source));
} else {
movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
}
void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
+ ASSERT(!source->IsFailure());
if (source->IsSmi()) {
- int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
- movq(dst, Immediate(smi));
+ Move(dst, Smi::cast(*source));
} else {
movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
movq(dst, kScratchRegister);
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
- Move(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ if (source->IsSmi()) {
+ SmiCompare(dst, Smi::cast(*source));
+ } else {
+ Move(kScratchRegister, source);
+ cmpq(dst, kScratchRegister);
+ }
}
void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
if (source->IsSmi()) {
- if (IsUnsafeSmi(source)) {
- LoadUnsafeSmi(kScratchRegister, source);
- cmpl(dst, kScratchRegister);
- } else {
- // For smi-comparison, it suffices to compare the low 32 bits.
- int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
- cmpl(dst, Immediate(smi));
- }
+ SmiCompare(dst, Smi::cast(*source));
} else {
ASSERT(source->IsHeapObject());
movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
void MacroAssembler::Push(Handle<Object> source) {
if (source->IsSmi()) {
- if (IsUnsafeSmi(source)) {
- LoadUnsafeSmi(kScratchRegister, source);
- push(kScratchRegister);
- } else {
- int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
- push(Immediate(smi));
- }
+ Push(Smi::cast(*source));
} else {
ASSERT(source->IsHeapObject());
movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
LoadUnsafeSmi(kScratchRegister, source);
push(kScratchRegister);
} else {
- int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source));
- push(Immediate(smi));
+ intptr_t smi = reinterpret_cast<intptr_t>(source);
+ if (is_int32(smi)) {
+ push(Immediate(static_cast<int32_t>(smi)));
+ } else {
+ Set(kScratchRegister, smi);
+ push(kScratchRegister);
+ }
+ }
+}
+
+
+void MacroAssembler::Test(const Operand& src, Smi* source) {
+ if (IsUnsafeSmi(source)) {
+ LoadUnsafeSmi(kScratchRegister, source);
+ testq(src, kScratchRegister);
+ } else {
+ intptr_t smi = reinterpret_cast<intptr_t>(source);
+ if (is_int32(smi)) {
+ testl(src, Immediate(static_cast<int32_t>(smi)));
+ } else {
+ Move(kScratchRegister, source);
+ testq(src, kScratchRegister);
+ }
}
}
}
}
-
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::PushRegistersFromMemory(RegList regs) {
}
}
+
void MacroAssembler::SaveRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of registers to memory location.
// arguments match the expected number of arguments. Fake a
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
- InvokeCode(Handle<Code>(code), expected, expected,
- RelocInfo::CODE_TARGET, flag);
+ InvokeCode(Handle<Code>(code),
+ expected,
+ expected,
+ RelocInfo::CODE_TARGET,
+ flag);
const char* name = Builtins::GetName(id);
int argc = Builtins::GetArgumentsCount(id);
} else {
movq(rax, Immediate(actual.immediate()));
if (expected.immediate() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
// Don't worry about adapting arguments for built-ins that
// don't want that done. Skip adaption code by making it look
// like we have a match between expected and actual number of
push(rbp);
movq(rbp, rsp);
push(rsi); // Context.
- push(Immediate(Smi::FromInt(type)));
+ Push(Smi::FromInt(type));
movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
push(kScratchRegister);
if (FLAG_debug_code) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (FLAG_debug_code) {
- movq(kScratchRegister, Immediate(Smi::FromInt(type)));
+ Move(kScratchRegister, Smi::FromInt(type));
cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, "stack frame types must match");
}
}
-
void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
movq(rbp, rsp);
// Reserve room for entry stack pointer and push the debug marker.
- ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+ ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // saved entry sp, patched before call
push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
}
-Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
- JSObject* holder, Register holder_reg,
+Register MacroAssembler::CheckMaps(JSObject* object,
+ Register object_reg,
+ JSObject* holder,
+ Register holder_reg,
Register scratch,
Label* miss) {
// Make sure there's no overlap between scratch and the other
}
// Check the holder map.
- Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(holder->map()));
+ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
j(not_equal, miss);
// Log the check depth.
}
-
-
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
movq(kScratchRegister,
FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
+ int token_offset =
+ Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
movq(scratch, FieldOperand(scratch, token_offset));
cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
j(not_equal, miss);
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
-
} } // namespace v8::internal
Register value,
Register scratch);
+ // Set the remembered set bit for [object+offset].
+ // The value is known to not be a smi.
+ // object is the object being stored into, value is the object being stored.
+ // If offset is zero, then the scratch register contains the array index into
+ // the elements array represented as a Smi.
+ // All registers are clobbered by the operation.
+ void RecordWriteNonSmi(Register object,
+ int offset,
+ Register value,
+ Register scratch);
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
// Tag an integer value if possible, or jump the integer value cannot be
// represented as a smi. Only uses the low 32 bit of the src registers.
+ // NOTICE: Destroys the dst register even if unsuccessful!
void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
// Adds constant to src and tags the result as a smi.
// Result must be a valid smi.
- void Integer64AddToSmi(Register dst, Register src, int constant);
+ void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
// Convert smi to 32-bit integer. I.e., not sign extended into
// high 32 bits of destination.
Register src,
int power);
+ // Simple comparison of smis.
+ void SmiCompare(Register dst, Register src);
+ void SmiCompare(Register dst, Smi* src);
+ void SmiCompare(const Operand& dst, Register src);
+ void SmiCompare(const Operand& dst, Smi* src);
+ // Sets sign and zero flags depending on value of smi in register.
+ void SmiTest(Register src);
+
// Functions performing a check on a known or potential smi. Returns
// a condition that is satisfied if the check is successful.
// Is the value a tagged smi.
Condition CheckSmi(Register src);
- // Is the value not a tagged smi.
- Condition CheckNotSmi(Register src);
-
// Is the value a positive tagged smi.
Condition CheckPositiveSmi(Register src);
- // Is the value not a positive tagged smi.
- Condition CheckNotPositiveSmi(Register src);
-
// Are both values are tagged smis.
Condition CheckBothSmi(Register first, Register second);
- // Is one of the values not a tagged smi.
- Condition CheckNotBothSmi(Register first, Register second);
-
// Is the value the minimum smi value (since we are using
// two's complement numbers, negating the value is known to yield
// a non-smi value).
Condition CheckIsMinSmi(Register src);
- // Check whether a tagged smi is equal to a constant.
- Condition CheckSmiEqualsConstant(Register src, int constant);
-
- // Check whether a tagged smi is greater than or equal to a constant.
- Condition CheckSmiGreaterEqualsConstant(Register src, int constant);
-
// Checks whether an 32-bit integer value is a valid for conversion
// to a smi.
Condition CheckInteger32ValidSmiValue(Register src);
// Jump to label if the value is not a positive tagged smi.
void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
- // Jump to label if the value is a tagged smi with value equal
+ // Jump to label if the value, which must be a tagged smi, has value equal
// to the constant.
- void JumpIfSmiEqualsConstant(Register src, int constant, Label* on_equals);
-
- // Jump to label if the value is a tagged smi with value greater than or equal
- // to the constant.
- void JumpIfSmiGreaterEqualsConstant(Register src,
- int constant,
- Label* on_equals);
+ void JumpIfSmiEqualsConstant(Register src, Smi* constant, Label* on_equals);
// Jump if either or both register are not smi values.
void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
// the label.
void SmiTryAddConstant(Register dst,
Register src,
- int32_t constant,
+ Smi* constant,
Label* on_not_smi_result);
+ // Add an integer constant to a tagged smi, giving a tagged smi as result.
+ // No overflow testing on the result is done.
+ void SmiAddConstant(Register dst, Register src, Smi* constant);
+
// Add an integer constant to a tagged smi, giving a tagged smi as result,
// or jumping to a label if the result cannot be represented by a smi.
- // If the label is NULL, no testing on the result is done.
void SmiAddConstant(Register dst,
Register src,
- int32_t constant,
+ Smi* constant,
Label* on_not_smi_result);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
+ // result. No testing on the result is done.
+ void SmiSubConstant(Register dst, Register src, Smi* constant);
+
+ // Subtract an integer constant from a tagged smi, giving a tagged smi as
// result, or jumping to a label if the result cannot be represented by a smi.
- // If the label is NULL, no testing on the result is done.
void SmiSubConstant(Register dst,
Register src,
- int32_t constant,
+ Smi* constant,
Label* on_not_smi_result);
// Negating a smi can give a negative zero or too large positive value.
+ // NOTICE: This operation jumps on success, not failure!
void SmiNeg(Register dst,
Register src,
- Label* on_not_smi_result);
+ Label* on_smi_result);
// Adds smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
void SmiAnd(Register dst, Register src1, Register src2);
void SmiOr(Register dst, Register src1, Register src2);
void SmiXor(Register dst, Register src1, Register src2);
- void SmiAndConstant(Register dst, Register src1, int constant);
- void SmiOrConstant(Register dst, Register src1, int constant);
- void SmiXorConstant(Register dst, Register src1, int constant);
+ void SmiAndConstant(Register dst, Register src1, Smi* constant);
+ void SmiOrConstant(Register dst, Register src1, Smi* constant);
+ void SmiXorConstant(Register dst, Register src1, Smi* constant);
void SmiShiftLeftConstant(Register dst,
Register src,
// Converts a positive smi to a negative index.
SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
+ bool IsUnsafeSmi(Smi* value);
+ void LoadUnsafeSmi(Register dst, Smi* source);
+
+ // Basic Smi operations.
+ void Move(Register dst, Smi* source);
+ void Move(const Operand& dst, Smi* source);
+ void Push(Smi* smi);
+ void Test(const Operand& dst, Smi* source);
+
// ---------------------------------------------------------------------------
// Macro instructions
- // Expression support
+ // Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
void Set(const Operand& dst, int64_t x);
// Handle support
- bool IsUnsafeSmi(Smi* value);
bool IsUnsafeSmi(Handle<Object> value) {
return IsUnsafeSmi(Smi::cast(*value));
}
- void LoadUnsafeSmi(Register dst, Smi* source);
void LoadUnsafeSmi(Register dst, Handle<Object> source) {
LoadUnsafeSmi(dst, Smi::cast(*source));
}
void Cmp(Register dst, Handle<Object> source);
void Cmp(const Operand& dst, Handle<Object> source);
void Push(Handle<Object> source);
- void Push(Smi* smi);
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
StubCache::Table table,
Register name,
Register offset) {
- // The offset register must hold a *positive* smi.
+ ASSERT_EQ(8, kPointerSize);
+ ASSERT_EQ(16, sizeof(StubCache::Entry));
+ // The offset register holds the entry offset times four (due to masking
+ // and shifting optimizations).
ExternalReference key_offset(SCTableReference::keyReference(table));
Label miss;
__ movq(kScratchRegister, key_offset);
- SmiIndex index = masm->SmiToIndex(offset, offset, kPointerSizeLog2);
// Check that the key in the entry matches the name.
- __ cmpl(name, Operand(kScratchRegister, index.reg, index.scale, 0));
+ // Multiply entry offset by 16 to get the entry address. Since the
+ // offset register already holds the entry offset times four, multiply
+ // by a further four.
+ __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
__ j(not_equal, &miss);
// Get the code entry from the cache.
// Use key_offset + kPointerSize, rather than loading value_offset.
__ movq(kScratchRegister,
- Operand(kScratchRegister, index.reg, index.scale, kPointerSize));
+ Operand(kScratchRegister, offset, times_4, kPointerSize));
// Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
__ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
}
+void VirtualFrame::EmitPush(Smi* smi_value) {
+ ASSERT(stack_pointer_ == element_count() - 1);
+ elements_.Add(FrameElement::MemoryElement());
+ stack_pointer_++;
+ __ Push(smi_value);
+}
+
+
void VirtualFrame::EmitPush(Handle<Object> value) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
switch (element.type()) {
case FrameElement::INVALID:
- __ push(Immediate(Smi::FromInt(0)));
+ __ Push(Smi::FromInt(0));
break;
case FrameElement::MEMORY:
function.ToRegister(rdi);
// Constructors are called with the number of arguments in register
- // eax for now. Another option would be to have separate construct
+ // rax for now. Another option would be to have separate construct
// call trampolines per different arguments counts encountered.
Result num_args = cgen()->allocator()->Allocate(rax);
ASSERT(num_args.is_valid());
void EmitPush(const Operand& operand);
void EmitPush(Heap::RootListIndex index);
void EmitPush(Immediate immediate);
+ void EmitPush(Smi* value);
// Uses kScratchRegister, emits appropriate relocation info.
void EmitPush(Handle<Object> value);
'test-disasm-ia32.cc',
'test-log-stack-tracer.cc'
],
- 'arch:x64': ['test-assembler-x64.cc', 'test-log-stack-tracer.cc'],
+ 'arch:x64': ['test-assembler-x64.cc',
+ 'test-macro-assembler-x64.cc',
+ 'test-log-stack-tracer.cc'],
'os:linux': ['test-platform-linux.cc'],
'os:macos': ['test-platform-macos.cc'],
'os:nullos': ['test-platform-nullos.cc'],
// Build huge string. This should fail with out of memory exception.
Local<Value> result = CompileRun(
"var str = Array.prototype.join.call({length: 513}, \"A\").toUpperCase();"
- "for (var i = 0; i < 21; i++) { str = str + str; }");
+ "for (var i = 0; i < 22; i++) { str = str + str; }");
// Check for out of memory state.
CHECK(result.IsEmpty());
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Smi::cast(value)->value());
+#ifndef V8_LONG_SMI
+ // TODO(lrn): We need a NumberFromIntptr function in order to test this.
value = Heap::NumberFromInt32(Smi::kMinValue - 1);
CHECK(value->IsHeapNumber());
CHECK(value->IsNumber());
CHECK_EQ(static_cast<double>(Smi::kMinValue - 1), value->Number());
+#endif
- value = Heap::NumberFromInt32(Smi::kMaxValue + 1);
+ value = Heap::NumberFromUint32(static_cast<uint32_t>(Smi::kMaxValue) + 1);
CHECK(value->IsHeapNumber());
CHECK(value->IsNumber());
- CHECK_EQ(static_cast<double>(Smi::kMaxValue + 1), value->Number());
+ CHECK_EQ(static_cast<double>(static_cast<uint32_t>(Smi::kMaxValue) + 1),
+ value->Number());
// nan oddball checks
CHECK(Heap::nan_value()->IsNumber());
CHECK_EQ(Smi::FromInt(1), array->length());
CHECK_EQ(array->GetElement(0), name);
- // Set array length with larger than smi value.
- Object* length = Heap::NumberFromInt32(Smi::kMaxValue + 1);
+// Set array length with larger than smi value.
+ Object* length =
+ Heap::NumberFromUint32(static_cast<uint32_t>(Smi::kMaxValue) + 1);
array->SetElementsLength(length);
uint32_t int_length = 0;
--- /dev/null
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "factory.h"
+#include "platform.h"
+#include "serialize.h"
+#include "cctest.h"
+
+using v8::internal::byte;
+using v8::internal::OS;
+using v8::internal::Assembler;
+using v8::internal::Condition;
+using v8::internal::MacroAssembler;
+using v8::internal::HandleScope;
+using v8::internal::Operand;
+using v8::internal::Immediate;
+using v8::internal::SmiIndex;
+using v8::internal::Label;
+using v8::internal::RelocInfo;
+using v8::internal::rax;
+using v8::internal::rbx;
+using v8::internal::rsi;
+using v8::internal::rdi;
+using v8::internal::rcx;
+using v8::internal::rdx;
+using v8::internal::rbp;
+using v8::internal::rsp;
+using v8::internal::r8;
+using v8::internal::r9;
+using v8::internal::r11;
+using v8::internal::r12;
+using v8::internal::r13;
+using v8::internal::r14;
+using v8::internal::r15;
+using v8::internal::FUNCTION_CAST;
+using v8::internal::CodeDesc;
+using v8::internal::less_equal;
+using v8::internal::not_equal;
+using v8::internal::not_zero;
+using v8::internal::greater;
+using v8::internal::greater_equal;
+using v8::internal::carry;
+using v8::internal::not_carry;
+using v8::internal::negative;
+using v8::internal::positive;
+using v8::internal::Smi;
+using v8::internal::kSmiTagMask;
+using v8::internal::kSmiValueSize;
+
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+// The AMD64 calling convention is used, with the first five arguments
+// in RSI, RDI, RDX, RCX, R8, and R9, and floating point arguments in
+// the XMM registers. The return value is in RAX.
+// This calling convention is used on Linux, with GCC, and on Mac OS,
+// with GCC. A different convention is used on 64-bit windows.
+
+typedef int (*F0)();
+
+#define __ masm->
+
+TEST(Smi) {
+ // Check that C++ Smi operations work as expected.
+ intptr_t test_numbers[] = {
+ 0, 1, -1, 127, 128, -128, -129, 255, 256, -256, -257,
+ Smi::kMaxValue, static_cast<intptr_t>(Smi::kMaxValue) + 1,
+ Smi::kMinValue, static_cast<intptr_t>(Smi::kMinValue) - 1
+ };
+ int test_number_count = 15;
+ for (int i = 0; i < test_number_count; i++) {
+ intptr_t number = test_numbers[i];
+ bool is_valid = Smi::IsValid(number);
+ bool is_in_range = number >= Smi::kMinValue && number <= Smi::kMaxValue;
+ CHECK_EQ(is_in_range, is_valid);
+ if (is_valid) {
+ Smi* smi_from_intptr = Smi::FromIntptr(number);
+ if (static_cast<int>(number) == number) { // Is a 32-bit int.
+ Smi* smi_from_int = Smi::FromInt(static_cast<int32_t>(number));
+ CHECK_EQ(smi_from_int, smi_from_intptr);
+ }
+ int smi_value = smi_from_intptr->value();
+ CHECK_EQ(number, smi_value);
+ }
+ }
+}
+
+
+static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) {
+ __ movl(rax, Immediate(id));
+ __ Move(rcx, Smi::FromInt(0));
+ __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+ __ cmpq(rcx, rdx);
+ __ j(not_equal, exit);
+}
+
+
+// Test that we can move a Smi value literally into a register.
+TEST(SmiMove) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+ MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestMoveSmi(masm, &exit, 1, Smi::FromInt(0));
+ TestMoveSmi(masm, &exit, 2, Smi::FromInt(127));
+ TestMoveSmi(masm, &exit, 3, Smi::FromInt(128));
+ TestMoveSmi(masm, &exit, 4, Smi::FromInt(255));
+ TestMoveSmi(masm, &exit, 5, Smi::FromInt(256));
+ TestMoveSmi(masm, &exit, 6, Smi::FromInt(Smi::kMaxValue));
+ TestMoveSmi(masm, &exit, 7, Smi::FromInt(-1));
+ TestMoveSmi(masm, &exit, 8, Smi::FromInt(-128));
+ TestMoveSmi(masm, &exit, 9, Smi::FromInt(-129));
+ TestMoveSmi(masm, &exit, 10, Smi::FromInt(-256));
+ TestMoveSmi(masm, &exit, 11, Smi::FromInt(-257));
+ TestMoveSmi(masm, &exit, 12, Smi::FromInt(Smi::kMinValue));
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestSmiCompare(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+ __ Move(rcx, Smi::FromInt(x));
+ __ movq(r8, rcx);
+ __ Move(rdx, Smi::FromInt(y));
+ __ movq(r9, rdx);
+ __ SmiCompare(rcx, rdx);
+ if (x < y) {
+ __ movl(rax, Immediate(id + 1));
+ __ j(greater_equal, exit);
+ } else if (x > y) {
+ __ movl(rax, Immediate(id + 2));
+ __ j(less_equal, exit);
+ } else {
+ ASSERT_EQ(x, y);
+ __ movl(rax, Immediate(id + 3));
+ __ j(not_equal, exit);
+ }
+ __ movl(rax, Immediate(id + 4));
+ __ cmpq(rcx, r8);
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ cmpq(rdx, r9);
+ __ j(not_equal, exit);
+
+ if (x != y) {
+ __ SmiCompare(rdx, rcx);
+ if (y < x) {
+ __ movl(rax, Immediate(id + 9));
+ __ j(greater_equal, exit);
+ } else {
+ ASSERT(y > x);
+ __ movl(rax, Immediate(id + 10));
+ __ j(less_equal, exit);
+ }
+ } else {
+ __ SmiCompare(rcx, rcx);
+ __ movl(rax, Immediate(id + 11));
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ cmpq(rcx, r8);
+ __ j(not_equal, exit);
+ }
+}
+
+
+// Test that we can compare smis for equality (and more).
+TEST(SmiCompare) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestSmiCompare(masm, &exit, 0x10, 0, 0);
+ TestSmiCompare(masm, &exit, 0x20, 0, 1);
+ TestSmiCompare(masm, &exit, 0x30, 1, 0);
+ TestSmiCompare(masm, &exit, 0x40, 1, 1);
+ TestSmiCompare(masm, &exit, 0x50, 0, -1);
+ TestSmiCompare(masm, &exit, 0x60, -1, 0);
+ TestSmiCompare(masm, &exit, 0x70, -1, -1);
+ TestSmiCompare(masm, &exit, 0x80, 0, Smi::kMinValue);
+ TestSmiCompare(masm, &exit, 0x90, Smi::kMinValue, 0);
+ TestSmiCompare(masm, &exit, 0xA0, 0, Smi::kMaxValue);
+ TestSmiCompare(masm, &exit, 0xB0, Smi::kMaxValue, 0);
+ TestSmiCompare(masm, &exit, 0xC0, -1, Smi::kMinValue);
+ TestSmiCompare(masm, &exit, 0xD0, Smi::kMinValue, -1);
+ TestSmiCompare(masm, &exit, 0xE0, -1, Smi::kMaxValue);
+ TestSmiCompare(masm, &exit, 0xF0, Smi::kMaxValue, -1);
+ TestSmiCompare(masm, &exit, 0x100, Smi::kMinValue, Smi::kMinValue);
+ TestSmiCompare(masm, &exit, 0x110, Smi::kMinValue, Smi::kMaxValue);
+ TestSmiCompare(masm, &exit, 0x120, Smi::kMaxValue, Smi::kMinValue);
+ TestSmiCompare(masm, &exit, 0x130, Smi::kMaxValue, Smi::kMaxValue);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+
+TEST(Integer32ToSmi) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ __ movq(rax, Immediate(1)); // Test number.
+ __ movl(rcx, Immediate(0));
+ __ Integer32ToSmi(rcx, rcx);
+ __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+ __ SmiCompare(rcx, rdx);
+ __ j(not_equal, &exit);
+
+ __ movq(rax, Immediate(2)); // Test number.
+ __ movl(rcx, Immediate(1024));
+ __ Integer32ToSmi(rcx, rcx);
+ __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024)));
+ __ SmiCompare(rcx, rdx);
+ __ j(not_equal, &exit);
+
+ __ movq(rax, Immediate(3)); // Test number.
+ __ movl(rcx, Immediate(-1));
+ __ Integer32ToSmi(rcx, rcx);
+ __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1)));
+ __ SmiCompare(rcx, rdx);
+ __ j(not_equal, &exit);
+
+ __ movq(rax, Immediate(4)); // Test number.
+ __ movl(rcx, Immediate(Smi::kMaxValue));
+ __ Integer32ToSmi(rcx, rcx);
+ __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue)));
+ __ SmiCompare(rcx, rdx);
+ __ j(not_equal, &exit);
+
+ __ movq(rax, Immediate(5)); // Test number.
+ __ movl(rcx, Immediate(Smi::kMinValue));
+ __ Integer32ToSmi(rcx, rcx);
+ __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue)));
+ __ SmiCompare(rcx, rdx);
+ __ j(not_equal, &exit);
+
+ // Different target register.
+
+ __ movq(rax, Immediate(6)); // Test number.
+ __ movl(rcx, Immediate(0));
+ __ Integer32ToSmi(r8, rcx);
+ __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+ __ SmiCompare(r8, rdx);
+ __ j(not_equal, &exit);
+
+ __ movq(rax, Immediate(7)); // Test number.
+ __ movl(rcx, Immediate(1024));
+ __ Integer32ToSmi(r8, rcx);
+ __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024)));
+ __ SmiCompare(r8, rdx);
+ __ j(not_equal, &exit);
+
+ __ movq(rax, Immediate(8)); // Test number.
+ __ movl(rcx, Immediate(-1));
+ __ Integer32ToSmi(r8, rcx);
+ __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1)));
+ __ SmiCompare(r8, rdx);
+ __ j(not_equal, &exit);
+
+ __ movq(rax, Immediate(9)); // Test number.
+ __ movl(rcx, Immediate(Smi::kMaxValue));
+ __ Integer32ToSmi(r8, rcx);
+ __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue)));
+ __ SmiCompare(r8, rdx);
+ __ j(not_equal, &exit);
+
+ __ movq(rax, Immediate(10)); // Test number.
+ __ movl(rcx, Immediate(Smi::kMinValue));
+ __ Integer32ToSmi(r8, rcx);
+ __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue)));
+ __ SmiCompare(r8, rdx);
+ __ j(not_equal, &exit);
+
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestI64PlusConstantToSmi(MacroAssembler* masm,
+ Label* exit,
+ int id,
+ int64_t x,
+ int y) {
+ int64_t result = x + y;
+ ASSERT(Smi::IsValid(result));
+ __ movl(rax, Immediate(id));
+ __ Move(r8, Smi::FromInt(result));
+ __ movq(rcx, x, RelocInfo::NONE);
+ __ movq(r11, rcx);
+ __ Integer64PlusConstantToSmi(rdx, rcx, y);
+ __ SmiCompare(rdx, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r11, rcx);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ Integer64PlusConstantToSmi(rcx, rcx, y);
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+}
+
+
+TEST(Integer64PlusConstantToSmi) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ int64_t twice_max = static_cast<int64_t>(Smi::kMaxValue) * 2;
+
+ TestI64PlusConstantToSmi(masm, &exit, 0x10, 0, 0);
+ TestI64PlusConstantToSmi(masm, &exit, 0x20, 0, 1);
+ TestI64PlusConstantToSmi(masm, &exit, 0x30, 1, 0);
+ TestI64PlusConstantToSmi(masm, &exit, 0x40, Smi::kMaxValue - 5, 5);
+ TestI64PlusConstantToSmi(masm, &exit, 0x50, Smi::kMinValue + 5, 5);
+ TestI64PlusConstantToSmi(masm, &exit, 0x60, twice_max, -Smi::kMaxValue);
+ TestI64PlusConstantToSmi(masm, &exit, 0x70, -twice_max, Smi::kMaxValue);
+ TestI64PlusConstantToSmi(masm, &exit, 0x80, 0, Smi::kMinValue);
+ TestI64PlusConstantToSmi(masm, &exit, 0x90, 0, Smi::kMaxValue);
+ TestI64PlusConstantToSmi(masm, &exit, 0xA0, Smi::kMinValue, 0);
+ TestI64PlusConstantToSmi(masm, &exit, 0xB0, Smi::kMaxValue, 0);
+ TestI64PlusConstantToSmi(masm, &exit, 0xC0, twice_max, Smi::kMinValue);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+TEST(SmiCheck) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+ Condition cond;
+
+ __ movl(rax, Immediate(1)); // Test number.
+
+ // CheckSmi
+
+ __ movl(rcx, Immediate(0));
+ __ Integer32ToSmi(rcx, rcx);
+ cond = masm->CheckSmi(rcx);
+ __ j(NegateCondition(cond), &exit);
+
+ __ incq(rax);
+ __ xor_(rcx, Immediate(kSmiTagMask));
+ cond = masm->CheckSmi(rcx);
+ __ j(cond, &exit);
+
+ __ incq(rax);
+ __ movl(rcx, Immediate(-1));
+ __ Integer32ToSmi(rcx, rcx);
+ cond = masm->CheckSmi(rcx);
+ __ j(NegateCondition(cond), &exit);
+
+ __ incq(rax);
+ __ xor_(rcx, Immediate(kSmiTagMask));
+ cond = masm->CheckSmi(rcx);
+ __ j(cond, &exit);
+
+ __ incq(rax);
+ __ movl(rcx, Immediate(Smi::kMaxValue));
+ __ Integer32ToSmi(rcx, rcx);
+ cond = masm->CheckSmi(rcx);
+ __ j(NegateCondition(cond), &exit);
+
+ __ incq(rax);
+ __ xor_(rcx, Immediate(kSmiTagMask));
+ cond = masm->CheckSmi(rcx);
+ __ j(cond, &exit);
+
+ __ incq(rax);
+ __ movl(rcx, Immediate(Smi::kMinValue));
+ __ Integer32ToSmi(rcx, rcx);
+ cond = masm->CheckSmi(rcx);
+ __ j(NegateCondition(cond), &exit);
+
+ __ incq(rax);
+ __ xor_(rcx, Immediate(kSmiTagMask));
+ cond = masm->CheckSmi(rcx);
+ __ j(cond, &exit);
+
+ // CheckPositiveSmi
+
+ __ incq(rax);
+ __ movl(rcx, Immediate(0));
+ __ Integer32ToSmi(rcx, rcx);
+ cond = masm->CheckPositiveSmi(rcx); // Zero counts as positive.
+ __ j(NegateCondition(cond), &exit);
+
+ __ incq(rax);
+ __ xor_(rcx, Immediate(kSmiTagMask));
+ cond = masm->CheckPositiveSmi(rcx); // "zero" non-smi.
+ __ j(cond, &exit);
+
+ __ incq(rax);
+ __ movq(rcx, Immediate(-1));
+ __ Integer32ToSmi(rcx, rcx);
+ cond = masm->CheckPositiveSmi(rcx); // Negative smis are not positive.
+ __ j(cond, &exit);
+
+ __ incq(rax);
+ __ movq(rcx, Immediate(Smi::kMinValue));
+ __ Integer32ToSmi(rcx, rcx);
+ cond = masm->CheckPositiveSmi(rcx); // Most negative smi is not positive.
+ __ j(cond, &exit);
+
+ __ incq(rax);
+ __ xor_(rcx, Immediate(kSmiTagMask));
+ cond = masm->CheckPositiveSmi(rcx); // "Negative" non-smi.
+ __ j(cond, &exit);
+
+ __ incq(rax);
+ __ movq(rcx, Immediate(Smi::kMaxValue));
+ __ Integer32ToSmi(rcx, rcx);
+ cond = masm->CheckPositiveSmi(rcx); // Most positive smi is positive.
+ __ j(NegateCondition(cond), &exit);
+
+ __ incq(rax);
+ __ xor_(rcx, Immediate(kSmiTagMask));
+ cond = masm->CheckPositiveSmi(rcx); // "Positive" non-smi.
+ __ j(cond, &exit);
+
+ // CheckIsMinSmi
+
+ __ incq(rax);
+ __ movq(rcx, Immediate(Smi::kMaxValue));
+ __ Integer32ToSmi(rcx, rcx);
+ cond = masm->CheckIsMinSmi(rcx);
+ __ j(cond, &exit);
+
+ __ incq(rax);
+ __ movq(rcx, Immediate(0));
+ __ Integer32ToSmi(rcx, rcx);
+ cond = masm->CheckIsMinSmi(rcx);
+ __ j(cond, &exit);
+
+ __ incq(rax);
+ __ movq(rcx, Immediate(Smi::kMinValue));
+ __ Integer32ToSmi(rcx, rcx);
+ cond = masm->CheckIsMinSmi(rcx);
+ __ j(NegateCondition(cond), &exit);
+
+ __ incq(rax);
+ __ movq(rcx, Immediate(Smi::kMinValue + 1));
+ __ Integer32ToSmi(rcx, rcx);
+ cond = masm->CheckIsMinSmi(rcx);
+ __ j(cond, &exit);
+
+ // CheckBothSmi
+
+ __ incq(rax);
+ __ movq(rcx, Immediate(Smi::kMaxValue));
+ __ Integer32ToSmi(rcx, rcx);
+ __ movq(rdx, Immediate(Smi::kMinValue));
+ __ Integer32ToSmi(rdx, rdx);
+ cond = masm->CheckBothSmi(rcx, rdx);
+ __ j(NegateCondition(cond), &exit);
+
+ __ incq(rax);
+ __ xor_(rcx, Immediate(kSmiTagMask));
+ cond = masm->CheckBothSmi(rcx, rdx);
+ __ j(cond, &exit);
+
+ __ incq(rax);
+ __ xor_(rdx, Immediate(kSmiTagMask));
+ cond = masm->CheckBothSmi(rcx, rdx);
+ __ j(cond, &exit);
+
+ __ incq(rax);
+ __ xor_(rcx, Immediate(kSmiTagMask));
+ cond = masm->CheckBothSmi(rcx, rdx);
+ __ j(cond, &exit);
+
+ __ incq(rax);
+ cond = masm->CheckBothSmi(rcx, rcx);
+ __ j(NegateCondition(cond), &exit);
+
+ __ incq(rax);
+ cond = masm->CheckBothSmi(rdx, rdx);
+ __ j(cond, &exit);
+
+ // CheckInteger32ValidSmiValue
+ __ incq(rax);
+ __ movq(rcx, Immediate(0));
+ cond = masm->CheckInteger32ValidSmiValue(rax);
+ __ j(NegateCondition(cond), &exit);
+
+ __ incq(rax);
+ __ movq(rcx, Immediate(-1));
+ cond = masm->CheckInteger32ValidSmiValue(rax);
+ __ j(NegateCondition(cond), &exit);
+
+ __ incq(rax);
+ __ movq(rcx, Immediate(Smi::kMaxValue));
+ cond = masm->CheckInteger32ValidSmiValue(rax);
+ __ j(NegateCondition(cond), &exit);
+
+ __ incq(rax);
+ __ movq(rcx, Immediate(Smi::kMinValue));
+ cond = masm->CheckInteger32ValidSmiValue(rax);
+ __ j(NegateCondition(cond), &exit);
+
+ // Success
+ __ xor_(rax, rax);
+
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+
+void TestSmiNeg(MacroAssembler* masm, Label* exit, int id, int x) {
+ __ Move(rcx, Smi::FromInt(x));
+ __ movq(r11, rcx);
+ if (x == Smi::kMinValue || x == 0) {
+ // Negation fails.
+ __ movl(rax, Immediate(id + 8));
+ __ SmiNeg(r9, rcx, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r11, rcx);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiNeg(rcx, rcx, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r11, rcx);
+ __ j(not_equal, exit);
+ } else {
+ Label smi_ok, smi_ok2;
+ int result = -x;
+ __ movl(rax, Immediate(id));
+ __ Move(r8, Smi::FromInt(result));
+
+ __ SmiNeg(r9, rcx, &smi_ok);
+ __ jmp(exit);
+ __ bind(&smi_ok);
+ __ incq(rax);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r11, rcx);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiNeg(rcx, rcx, &smi_ok2);
+ __ jmp(exit);
+ __ bind(&smi_ok2);
+ __ incq(rax);
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+ }
+}
+
+
+TEST(SmiNeg) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestSmiNeg(masm, &exit, 0x10, 0);
+ TestSmiNeg(masm, &exit, 0x20, 1);
+ TestSmiNeg(masm, &exit, 0x30, -1);
+ TestSmiNeg(masm, &exit, 0x40, 127);
+ TestSmiNeg(masm, &exit, 0x50, 65535);
+ TestSmiNeg(masm, &exit, 0x60, Smi::kMinValue);
+ TestSmiNeg(masm, &exit, 0x70, Smi::kMaxValue);
+ TestSmiNeg(masm, &exit, 0x80, -Smi::kMaxValue);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+
+
+static void SmiAddTest(MacroAssembler* masm,
+ Label* exit,
+ int id,
+ int first,
+ int second) {
+ __ movl(rcx, Immediate(first));
+ __ Integer32ToSmi(rcx, rcx);
+ __ movl(rdx, Immediate(second));
+ __ Integer32ToSmi(rdx, rdx);
+ __ movl(r8, Immediate(first + second));
+ __ Integer32ToSmi(r8, r8);
+
+ __ movl(rax, Immediate(id)); // Test number.
+ __ SmiAdd(r9, rcx, rdx, exit);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiAdd(rcx, rcx, rdx, exit); \
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+
+ __ movl(rcx, Immediate(first));
+ __ Integer32ToSmi(rcx, rcx);
+
+ __ incq(rax);
+ __ SmiAddConstant(r9, rcx, Smi::FromInt(second));
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ SmiAddConstant(rcx, rcx, Smi::FromInt(second));
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+
+ __ movl(rcx, Immediate(first));
+ __ Integer32ToSmi(rcx, rcx);
+
+ __ incq(rax);
+ __ SmiAddConstant(r9, rcx, Smi::FromInt(second), exit);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), exit);
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+}
+
+TEST(SmiAdd) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ // No-overflow tests.
+ SmiAddTest(masm, &exit, 0x10, 1, 2);
+ SmiAddTest(masm, &exit, 0x20, 1, -2);
+ SmiAddTest(masm, &exit, 0x30, -1, 2);
+ SmiAddTest(masm, &exit, 0x40, -1, -2);
+ SmiAddTest(masm, &exit, 0x50, 0x1000, 0x2000);
+ SmiAddTest(masm, &exit, 0x60, Smi::kMinValue, 5);
+ SmiAddTest(masm, &exit, 0x70, Smi::kMaxValue, -5);
+ SmiAddTest(masm, &exit, 0x80, Smi::kMaxValue, Smi::kMinValue);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+static void SmiSubTest(MacroAssembler* masm,
+ Label* exit,
+ int id,
+ int first,
+ int second) {
+ __ Move(rcx, Smi::FromInt(first));
+ __ Move(rdx, Smi::FromInt(second));
+ __ Move(r8, Smi::FromInt(first - second));
+
+ __ movl(rax, Immediate(id)); // Test 0.
+ __ SmiSub(r9, rcx, rdx, exit);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax); // Test 1.
+ __ SmiSub(rcx, rcx, rdx, exit);
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+
+ __ Move(rcx, Smi::FromInt(first));
+
+ __ incq(rax); // Test 2.
+ __ SmiSubConstant(r9, rcx, Smi::FromInt(second));
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax); // Test 3.
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(second));
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+
+ __ Move(rcx, Smi::FromInt(first));
+
+ __ incq(rax); // Test 4.
+ __ SmiSubConstant(r9, rcx, Smi::FromInt(second), exit);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax); // Test 5.
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), exit);
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+}
+
+static void SmiSubOverflowTest(MacroAssembler* masm,
+ Label* exit,
+ int id,
+ int x) {
+ // Subtracts a Smi from x so that the subtraction overflows.
+ ASSERT(x != -1); // Can't overflow by subtracting a Smi.
+ int y_max = (x < 0) ? (Smi::kMaxValue + 0) : (Smi::kMinValue + 0);
+ int y_min = (x < 0) ? (Smi::kMaxValue + x + 2) : (Smi::kMinValue + x);
+
+ __ movl(rax, Immediate(id));
+ __ Move(rcx, Smi::FromInt(x));
+ __ movq(r11, rcx); // Store original Smi value of x in r11.
+ __ Move(rdx, Smi::FromInt(y_min));
+ {
+ Label overflow_ok;
+ __ SmiSub(r9, rcx, rdx, &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiSub(rcx, rcx, rdx, &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ __ movq(rcx, r11);
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiSubConstant(r9, rcx, Smi::FromInt(y_min), &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_min), &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ __ Move(rdx, Smi::FromInt(y_max));
+
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiSub(r9, rcx, rdx, &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiSub(rcx, rcx, rdx, &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ __ movq(rcx, r11);
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiSubConstant(r9, rcx, Smi::FromInt(y_max), &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+ }
+
+ {
+ Label overflow_ok;
+ __ incq(rax);
+ __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+ }
+}
+
+
+TEST(SmiSub) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ SmiSubTest(masm, &exit, 0x10, 1, 2);
+ SmiSubTest(masm, &exit, 0x20, 1, -2);
+ SmiSubTest(masm, &exit, 0x30, -1, 2);
+ SmiSubTest(masm, &exit, 0x40, -1, -2);
+ SmiSubTest(masm, &exit, 0x50, 0x1000, 0x2000);
+ SmiSubTest(masm, &exit, 0x60, Smi::kMinValue, -5);
+ SmiSubTest(masm, &exit, 0x70, Smi::kMaxValue, 5);
+ SmiSubTest(masm, &exit, 0x80, -Smi::kMaxValue, Smi::kMinValue);
+ SmiSubTest(masm, &exit, 0x90, 0, Smi::kMaxValue);
+
+ SmiSubOverflowTest(masm, &exit, 0xA0, 1);
+ SmiSubOverflowTest(masm, &exit, 0xB0, 1024);
+ SmiSubOverflowTest(masm, &exit, 0xC0, Smi::kMaxValue);
+ SmiSubOverflowTest(masm, &exit, 0xD0, -2);
+ SmiSubOverflowTest(masm, &exit, 0xE0, -42000);
+ SmiSubOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
+ SmiSubOverflowTest(masm, &exit, 0x100, 0);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+
+void TestSmiMul(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+ int64_t result = static_cast<int64_t>(x) * static_cast<int64_t>(y);
+ bool negative_zero = (result == 0) && (x < 0 || y < 0);
+ __ Move(rcx, Smi::FromInt(x));
+ __ movq(r11, rcx);
+ __ Move(rdx, Smi::FromInt(y));
+ if (Smi::IsValid(result) && !negative_zero) {
+ __ movl(rax, Immediate(id));
+ __ Move(r8, Smi::FromIntptr(result));
+ __ SmiMul(r9, rcx, rdx, exit);
+ __ incq(rax);
+ __ SmiCompare(r11, rcx);
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiMul(rcx, rcx, rdx, exit);
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+ } else {
+ __ movl(rax, Immediate(id + 8));
+ Label overflow_ok, overflow_ok2;
+ __ SmiMul(r9, rcx, rdx, &overflow_ok);
+ __ jmp(exit);
+ __ bind(&overflow_ok);
+ __ incq(rax);
+ __ SmiCompare(r11, rcx);
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ SmiMul(rcx, rcx, rdx, &overflow_ok2);
+ __ jmp(exit);
+ __ bind(&overflow_ok2);
+ // 31-bit version doesn't preserve rcx on failure.
+ // __ incq(rax);
+ // __ SmiCompare(r11, rcx);
+ // __ j(not_equal, exit);
+ }
+}
+
+
+TEST(SmiMul) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestSmiMul(masm, &exit, 0x10, 0, 0);
+ TestSmiMul(masm, &exit, 0x20, -1, 0);
+ TestSmiMul(masm, &exit, 0x30, 0, -1);
+ TestSmiMul(masm, &exit, 0x40, -1, -1);
+ TestSmiMul(masm, &exit, 0x50, 0x10000, 0x10000);
+ TestSmiMul(masm, &exit, 0x60, 0x10000, 0xffff);
+ TestSmiMul(masm, &exit, 0x70, 0x10000, 0xffff);
+ TestSmiMul(masm, &exit, 0x80, Smi::kMaxValue, -1);
+ TestSmiMul(masm, &exit, 0x90, Smi::kMaxValue, -2);
+ TestSmiMul(masm, &exit, 0xa0, Smi::kMaxValue, 2);
+ TestSmiMul(masm, &exit, 0xb0, (Smi::kMaxValue / 2), 2);
+ TestSmiMul(masm, &exit, 0xc0, (Smi::kMaxValue / 2) + 1, 2);
+ TestSmiMul(masm, &exit, 0xd0, (Smi::kMinValue / 2), 2);
+ TestSmiMul(masm, &exit, 0xe0, (Smi::kMinValue / 2) - 1, 2);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestSmiDiv(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+ bool division_by_zero = (y == 0);
+ bool negative_zero = (x == 0 && y < 0);
+#ifdef V8_LONG_SMI
+ bool overflow = (x == Smi::kMinValue && y < 0); // Safe approx. used.
+#else
+ bool overflow = (x == Smi::kMinValue && y == -1);
+#endif
+ bool fraction = !division_by_zero && !overflow && (x % y != 0);
+ __ Move(r11, Smi::FromInt(x));
+ __ Move(r12, Smi::FromInt(y));
+ if (!fraction && !overflow && !negative_zero && !division_by_zero) {
+ // Division succeeds
+ __ movq(rcx, r11);
+ __ movq(r15, Immediate(id));
+ int result = x / y;
+ __ Move(r8, Smi::FromInt(result));
+ __ SmiDiv(r9, rcx, r12, exit);
+ // Might have destroyed rcx and r12.
+ __ incq(r15);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(r15);
+ __ movq(rcx, r11);
+ __ Move(r12, Smi::FromInt(y));
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+
+ __ incq(r15);
+ __ SmiDiv(rcx, rcx, r12, exit);
+
+ __ incq(r15);
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+ } else {
+ // Division fails.
+ __ movq(r15, Immediate(id + 8));
+
+ Label fail_ok, fail_ok2;
+ __ movq(rcx, r11);
+ __ SmiDiv(r9, rcx, r12, &fail_ok);
+ __ jmp(exit);
+ __ bind(&fail_ok);
+
+ __ incq(r15);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+
+ __ incq(r15);
+ __ SmiDiv(rcx, rcx, r12, &fail_ok2);
+ __ jmp(exit);
+ __ bind(&fail_ok2);
+
+ __ incq(r15);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+ }
+}
+
+
+TEST(SmiDiv) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestSmiDiv(masm, &exit, 0x10, 1, 1);
+ TestSmiDiv(masm, &exit, 0x20, 1, 0);
+ TestSmiDiv(masm, &exit, 0x30, -1, 0);
+ TestSmiDiv(masm, &exit, 0x40, 0, 1);
+ TestSmiDiv(masm, &exit, 0x50, 0, -1);
+ TestSmiDiv(masm, &exit, 0x60, 4, 2);
+ TestSmiDiv(masm, &exit, 0x70, -4, 2);
+ TestSmiDiv(masm, &exit, 0x80, 4, -2);
+ TestSmiDiv(masm, &exit, 0x90, -4, -2);
+ TestSmiDiv(masm, &exit, 0xa0, 3, 2);
+ TestSmiDiv(masm, &exit, 0xb0, 3, 4);
+ TestSmiDiv(masm, &exit, 0xc0, 1, Smi::kMaxValue);
+ TestSmiDiv(masm, &exit, 0xd0, -1, Smi::kMaxValue);
+ TestSmiDiv(masm, &exit, 0xe0, Smi::kMaxValue, 1);
+ TestSmiDiv(masm, &exit, 0xf0, Smi::kMaxValue, Smi::kMaxValue);
+ TestSmiDiv(masm, &exit, 0x100, Smi::kMaxValue, -Smi::kMaxValue);
+ TestSmiDiv(masm, &exit, 0x110, Smi::kMaxValue, -1);
+ TestSmiDiv(masm, &exit, 0x120, Smi::kMinValue, 1);
+ TestSmiDiv(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
+ TestSmiDiv(masm, &exit, 0x140, Smi::kMinValue, -1);
+
+ __ xor_(r15, r15); // Success.
+ __ bind(&exit);
+ __ movq(rax, r15);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestSmiMod(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+ bool division_by_zero = (y == 0);
+ bool division_overflow = (x == Smi::kMinValue) && (y == -1);
+ bool fraction = !division_by_zero && !division_overflow && ((x % y) != 0);
+ bool negative_zero = (!fraction && x < 0);
+ __ Move(rcx, Smi::FromInt(x));
+ __ movq(r11, rcx);
+ __ Move(r12, Smi::FromInt(y));
+ if (!division_overflow && !negative_zero && !division_by_zero) {
+ // Modulo succeeds
+ __ movq(r15, Immediate(id));
+ int result = x % y;
+ __ Move(r8, Smi::FromInt(result));
+ __ SmiMod(r9, rcx, r12, exit);
+
+ __ incq(r15);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(r15);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+
+ __ incq(r15);
+ __ SmiMod(rcx, rcx, r12, exit);
+
+ __ incq(r15);
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+ } else {
+ // Modulo fails.
+ __ movq(r15, Immediate(id + 8));
+
+ Label fail_ok, fail_ok2;
+ __ SmiMod(r9, rcx, r12, &fail_ok);
+ __ jmp(exit);
+ __ bind(&fail_ok);
+
+ __ incq(r15);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+
+ __ incq(r15);
+ __ SmiMod(rcx, rcx, r12, &fail_ok2);
+ __ jmp(exit);
+ __ bind(&fail_ok2);
+
+ __ incq(r15);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+ }
+}
+
+
+TEST(SmiMod) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestSmiMod(masm, &exit, 0x10, 1, 1);
+ TestSmiMod(masm, &exit, 0x20, 1, 0);
+ TestSmiMod(masm, &exit, 0x30, -1, 0);
+ TestSmiMod(masm, &exit, 0x40, 0, 1);
+ TestSmiMod(masm, &exit, 0x50, 0, -1);
+ TestSmiMod(masm, &exit, 0x60, 4, 2);
+ TestSmiMod(masm, &exit, 0x70, -4, 2);
+ TestSmiMod(masm, &exit, 0x80, 4, -2);
+ TestSmiMod(masm, &exit, 0x90, -4, -2);
+ TestSmiMod(masm, &exit, 0xa0, 3, 2);
+ TestSmiMod(masm, &exit, 0xb0, 3, 4);
+ TestSmiMod(masm, &exit, 0xc0, 1, Smi::kMaxValue);
+ TestSmiMod(masm, &exit, 0xd0, -1, Smi::kMaxValue);
+ TestSmiMod(masm, &exit, 0xe0, Smi::kMaxValue, 1);
+ TestSmiMod(masm, &exit, 0xf0, Smi::kMaxValue, Smi::kMaxValue);
+ TestSmiMod(masm, &exit, 0x100, Smi::kMaxValue, -Smi::kMaxValue);
+ TestSmiMod(masm, &exit, 0x110, Smi::kMaxValue, -1);
+ TestSmiMod(masm, &exit, 0x120, Smi::kMinValue, 1);
+ TestSmiMod(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
+ TestSmiMod(masm, &exit, 0x140, Smi::kMinValue, -1);
+
+ __ xor_(r15, r15); // Success.
+ __ bind(&exit);
+ __ movq(rax, r15);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
+ __ movl(rax, Immediate(id));
+
+ for (int i = 0; i < 8; i++) {
+ __ Move(rcx, Smi::FromInt(x));
+ SmiIndex index = masm->SmiToIndex(rdx, rcx, i);
+ ASSERT(index.reg.is(rcx) || index.reg.is(rdx));
+ __ shl(index.reg, Immediate(index.scale));
+ __ Set(r8, static_cast<intptr_t>(x) << i);
+ __ SmiCompare(index.reg, r8);
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ Move(rcx, Smi::FromInt(x));
+ index = masm->SmiToIndex(rcx, rcx, i);
+ ASSERT(index.reg.is(rcx));
+ __ shl(rcx, Immediate(index.scale));
+ __ Set(r8, static_cast<intptr_t>(x) << i);
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+ __ incq(rax);
+
+ __ Move(rcx, Smi::FromInt(x));
+ index = masm->SmiToNegativeIndex(rdx, rcx, i);
+ ASSERT(index.reg.is(rcx) || index.reg.is(rdx));
+ __ shl(index.reg, Immediate(index.scale));
+ __ Set(r8, static_cast<intptr_t>(-x) << i);
+ __ SmiCompare(index.reg, r8);
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ Move(rcx, Smi::FromInt(x));
+ index = masm->SmiToNegativeIndex(rcx, rcx, i);
+ ASSERT(index.reg.is(rcx));
+ __ shl(rcx, Immediate(index.scale));
+ __ Set(r8, static_cast<intptr_t>(-x) << i);
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+ __ incq(rax);
+ }
+}
+
+TEST(SmiIndex) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestSmiIndex(masm, &exit, 0x10, 0);
+ TestSmiIndex(masm, &exit, 0x20, 1);
+ TestSmiIndex(masm, &exit, 0x30, 100);
+ TestSmiIndex(masm, &exit, 0x40, 1000);
+ TestSmiIndex(masm, &exit, 0x50, Smi::kMaxValue);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+ __ movl(rax, Immediate(id));
+ __ Move(rcx, Smi::FromInt(x));
+ __ Move(rdx, Smi::FromInt(y));
+ __ xor_(rdx, Immediate(kSmiTagMask));
+ __ SelectNonSmi(r9, rcx, rdx, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r9, rdx);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ Move(rcx, Smi::FromInt(x));
+ __ Move(rdx, Smi::FromInt(y));
+ __ xor_(rcx, Immediate(kSmiTagMask));
+ __ SelectNonSmi(r9, rcx, rdx, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r9, rcx);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ Label fail_ok;
+ __ Move(rcx, Smi::FromInt(x));
+ __ Move(rdx, Smi::FromInt(y));
+ __ xor_(rcx, Immediate(kSmiTagMask));
+ __ xor_(rdx, Immediate(kSmiTagMask));
+ __ SelectNonSmi(r9, rcx, rdx, &fail_ok);
+ __ jmp(exit);
+ __ bind(&fail_ok);
+}
+
+
+TEST(SmiSelectNonSmi) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false); // Avoid inline checks.
+ Label exit;
+
+ TestSelectNonSmi(masm, &exit, 0x10, 0, 0);
+ TestSelectNonSmi(masm, &exit, 0x20, 0, 1);
+ TestSelectNonSmi(masm, &exit, 0x30, 1, 0);
+ TestSelectNonSmi(masm, &exit, 0x40, 0, -1);
+ TestSelectNonSmi(masm, &exit, 0x50, -1, 0);
+ TestSelectNonSmi(masm, &exit, 0x60, -1, -1);
+ TestSelectNonSmi(masm, &exit, 0x70, 1, 1);
+ TestSelectNonSmi(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
+ TestSelectNonSmi(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestSmiAnd(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+ int result = x & y;
+
+ __ movl(rax, Immediate(id));
+
+ __ Move(rcx, Smi::FromInt(x));
+ __ movq(r11, rcx);
+ __ Move(rdx, Smi::FromInt(y));
+ __ Move(r8, Smi::FromInt(result));
+ __ SmiAnd(r9, rcx, rdx);
+ __ SmiCompare(r8, r9);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r11, rcx);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiAnd(rcx, rcx, rdx);
+ __ SmiCompare(r8, rcx);
+ __ j(not_equal, exit);
+
+ __ movq(rcx, r11);
+ __ incq(rax);
+ __ SmiAndConstant(r9, rcx, Smi::FromInt(y));
+ __ SmiCompare(r8, r9);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r11, rcx);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiAndConstant(rcx, rcx, Smi::FromInt(y));
+ __ SmiCompare(r8, rcx);
+ __ j(not_equal, exit);
+}
+
+
+TEST(SmiAnd) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestSmiAnd(masm, &exit, 0x10, 0, 0);
+ TestSmiAnd(masm, &exit, 0x20, 0, 1);
+ TestSmiAnd(masm, &exit, 0x30, 1, 0);
+ TestSmiAnd(masm, &exit, 0x40, 0, -1);
+ TestSmiAnd(masm, &exit, 0x50, -1, 0);
+ TestSmiAnd(masm, &exit, 0x60, -1, -1);
+ TestSmiAnd(masm, &exit, 0x70, 1, 1);
+ TestSmiAnd(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
+ TestSmiAnd(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
+ TestSmiAnd(masm, &exit, 0xA0, Smi::kMinValue, -1);
+ TestSmiAnd(masm, &exit, 0xB0, Smi::kMinValue, -1);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestSmiOr(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+ int result = x | y;
+
+ __ movl(rax, Immediate(id));
+
+ __ Move(rcx, Smi::FromInt(x));
+ __ movq(r11, rcx);
+ __ Move(rdx, Smi::FromInt(y));
+ __ Move(r8, Smi::FromInt(result));
+ __ SmiOr(r9, rcx, rdx);
+ __ SmiCompare(r8, r9);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r11, rcx);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiOr(rcx, rcx, rdx);
+ __ SmiCompare(r8, rcx);
+ __ j(not_equal, exit);
+
+ __ movq(rcx, r11);
+ __ incq(rax);
+ __ SmiOrConstant(r9, rcx, Smi::FromInt(y));
+ __ SmiCompare(r8, r9);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r11, rcx);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiOrConstant(rcx, rcx, Smi::FromInt(y));
+ __ SmiCompare(r8, rcx);
+ __ j(not_equal, exit);
+}
+
+
+TEST(SmiOr) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestSmiOr(masm, &exit, 0x10, 0, 0);
+ TestSmiOr(masm, &exit, 0x20, 0, 1);
+ TestSmiOr(masm, &exit, 0x30, 1, 0);
+ TestSmiOr(masm, &exit, 0x40, 0, -1);
+ TestSmiOr(masm, &exit, 0x50, -1, 0);
+ TestSmiOr(masm, &exit, 0x60, -1, -1);
+ TestSmiOr(masm, &exit, 0x70, 1, 1);
+ TestSmiOr(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
+ TestSmiOr(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
+ TestSmiOr(masm, &exit, 0xA0, Smi::kMinValue, -1);
+ TestSmiOr(masm, &exit, 0xB0, 0x05555555, 0x01234567);
+ TestSmiOr(masm, &exit, 0xC0, 0x05555555, 0x0fedcba9);
+ TestSmiOr(masm, &exit, 0xD0, Smi::kMinValue, -1);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestSmiXor(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+ int result = x ^ y;
+
+ __ movl(rax, Immediate(id));
+
+ __ Move(rcx, Smi::FromInt(x));
+ __ movq(r11, rcx);
+ __ Move(rdx, Smi::FromInt(y));
+ __ Move(r8, Smi::FromInt(result));
+ __ SmiXor(r9, rcx, rdx);
+ __ SmiCompare(r8, r9);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r11, rcx);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiXor(rcx, rcx, rdx);
+ __ SmiCompare(r8, rcx);
+ __ j(not_equal, exit);
+
+ __ movq(rcx, r11);
+ __ incq(rax);
+ __ SmiXorConstant(r9, rcx, Smi::FromInt(y));
+ __ SmiCompare(r8, r9);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r11, rcx);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiXorConstant(rcx, rcx, Smi::FromInt(y));
+ __ SmiCompare(r8, rcx);
+ __ j(not_equal, exit);
+}
+
+
+TEST(SmiXor) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestSmiXor(masm, &exit, 0x10, 0, 0);
+ TestSmiXor(masm, &exit, 0x20, 0, 1);
+ TestSmiXor(masm, &exit, 0x30, 1, 0);
+ TestSmiXor(masm, &exit, 0x40, 0, -1);
+ TestSmiXor(masm, &exit, 0x50, -1, 0);
+ TestSmiXor(masm, &exit, 0x60, -1, -1);
+ TestSmiXor(masm, &exit, 0x70, 1, 1);
+ TestSmiXor(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
+ TestSmiXor(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
+ TestSmiXor(masm, &exit, 0xA0, Smi::kMinValue, -1);
+ TestSmiXor(masm, &exit, 0xB0, 0x5555555, 0x01234567);
+ TestSmiXor(masm, &exit, 0xC0, 0x5555555, 0x0fedcba9);
+ TestSmiXor(masm, &exit, 0xD0, Smi::kMinValue, -1);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestSmiNot(MacroAssembler* masm, Label* exit, int id, int x) {
+ int result = ~x;
+ __ movl(rax, Immediate(id));
+
+ __ Move(r8, Smi::FromInt(result));
+ __ Move(rcx, Smi::FromInt(x));
+ __ movq(r11, rcx);
+
+ __ SmiNot(r9, rcx);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r11, rcx);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ SmiNot(rcx, rcx);
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+}
+
+
+TEST(SmiNot) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestSmiNot(masm, &exit, 0x10, 0);
+ TestSmiNot(masm, &exit, 0x20, 1);
+ TestSmiNot(masm, &exit, 0x30, -1);
+ TestSmiNot(masm, &exit, 0x40, 127);
+ TestSmiNot(masm, &exit, 0x50, 65535);
+ TestSmiNot(masm, &exit, 0x60, Smi::kMinValue);
+ TestSmiNot(masm, &exit, 0x70, Smi::kMaxValue);
+ TestSmiNot(masm, &exit, 0x80, 0x05555555);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestSmiShiftLeft(MacroAssembler* masm, Label* exit, int id, int x) {
+ const int shifts[] = { 0, 1, 7, 24, kSmiValueSize - 1};
+ const int kNumShifts = 5;
+ __ movl(rax, Immediate(id));
+ for (int i = 0; i < kNumShifts; i++) {
+ // rax == id + i * 10.
+ int shift = shifts[i];
+ int result = x << shift;
+ if (Smi::IsValid(result)) {
+ __ Move(r8, Smi::FromInt(result));
+ __ Move(rcx, Smi::FromInt(x));
+ __ SmiShiftLeftConstant(r9, rcx, shift, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ Move(rcx, Smi::FromInt(x));
+ __ SmiShiftLeftConstant(rcx, rcx, shift, exit);
+
+ __ incq(rax);
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ Move(rdx, Smi::FromInt(x));
+ __ Move(rcx, Smi::FromInt(shift));
+ __ SmiShiftLeft(r9, rdx, rcx, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ Move(rdx, Smi::FromInt(x));
+ __ Move(r11, Smi::FromInt(shift));
+ __ SmiShiftLeft(r9, rdx, r11, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ Move(rdx, Smi::FromInt(x));
+ __ Move(r11, Smi::FromInt(shift));
+ __ SmiShiftLeft(rdx, rdx, r11, exit);
+
+ __ incq(rax);
+ __ SmiCompare(rdx, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ } else {
+ // Cannot happen with long smis.
+ Label fail_ok;
+ __ Move(rcx, Smi::FromInt(x));
+ __ movq(r11, rcx);
+ __ SmiShiftLeftConstant(r9, rcx, shift, &fail_ok);
+ __ jmp(exit);
+ __ bind(&fail_ok);
+
+ __ incq(rax);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ Label fail_ok2;
+ __ SmiShiftLeftConstant(rcx, rcx, shift, &fail_ok2);
+ __ jmp(exit);
+ __ bind(&fail_ok2);
+
+ __ incq(rax);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ Move(r8, Smi::FromInt(shift));
+ Label fail_ok3;
+ __ SmiShiftLeft(r9, rcx, r8, &fail_ok3);
+ __ jmp(exit);
+ __ bind(&fail_ok3);
+
+ __ incq(rax);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ Move(r8, Smi::FromInt(shift));
+ __ movq(rdx, r11);
+ Label fail_ok4;
+ __ SmiShiftLeft(rdx, rdx, r8, &fail_ok4);
+ __ jmp(exit);
+ __ bind(&fail_ok4);
+
+ __ incq(rax);
+ __ SmiCompare(rdx, r11);
+ __ j(not_equal, exit);
+
+ __ addq(rax, Immediate(3));
+ }
+ }
+}
+
+
+TEST(SmiShiftLeft) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestSmiShiftLeft(masm, &exit, 0x10, 0);
+ TestSmiShiftLeft(masm, &exit, 0x50, 1);
+ TestSmiShiftLeft(masm, &exit, 0x90, 127);
+ TestSmiShiftLeft(masm, &exit, 0xD0, 65535);
+ TestSmiShiftLeft(masm, &exit, 0x110, Smi::kMaxValue);
+ TestSmiShiftLeft(masm, &exit, 0x150, Smi::kMinValue);
+ TestSmiShiftLeft(masm, &exit, 0x190, -1);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestSmiShiftLogicalRight(MacroAssembler* masm,
+ Label* exit,
+ int id,
+ int x) {
+ const int shifts[] = { 0, 1, 7, 24, kSmiValueSize - 1};
+ const int kNumShifts = 5;
+ __ movl(rax, Immediate(id));
+ for (int i = 0; i < kNumShifts; i++) {
+ int shift = shifts[i];
+ intptr_t result = static_cast<unsigned int>(x) >> shift;
+ if (Smi::IsValid(result)) {
+ __ Move(r8, Smi::FromInt(result));
+ __ Move(rcx, Smi::FromInt(x));
+ __ SmiShiftLogicalRightConstant(r9, rcx, shift, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ Move(rdx, Smi::FromInt(x));
+ __ Move(rcx, Smi::FromInt(shift));
+ __ SmiShiftLogicalRight(r9, rdx, rcx, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ Move(rdx, Smi::FromInt(x));
+ __ Move(r11, Smi::FromInt(shift));
+ __ SmiShiftLogicalRight(r9, rdx, r11, exit);
+
+ __ incq(rax);
+ __ SmiCompare(r9, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ } else {
+ // Cannot happen with long smis.
+ Label fail_ok;
+ __ Move(rcx, Smi::FromInt(x));
+ __ movq(r11, rcx);
+ __ SmiShiftLogicalRightConstant(r9, rcx, shift, &fail_ok);
+ __ jmp(exit);
+ __ bind(&fail_ok);
+
+ __ incq(rax);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ Move(r8, Smi::FromInt(shift));
+ Label fail_ok3;
+ __ SmiShiftLogicalRight(r9, rcx, r8, &fail_ok3);
+ __ jmp(exit);
+ __ bind(&fail_ok3);
+
+ __ incq(rax);
+ __ SmiCompare(rcx, r11);
+ __ j(not_equal, exit);
+
+ __ addq(rax, Immediate(3));
+ }
+ }
+}
+
+
+TEST(SmiShiftLogicalRight) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestSmiShiftLogicalRight(masm, &exit, 0x10, 0);
+ TestSmiShiftLogicalRight(masm, &exit, 0x30, 1);
+ TestSmiShiftLogicalRight(masm, &exit, 0x50, 127);
+ TestSmiShiftLogicalRight(masm, &exit, 0x70, 65535);
+ TestSmiShiftLogicalRight(masm, &exit, 0x90, Smi::kMaxValue);
+ TestSmiShiftLogicalRight(masm, &exit, 0xB0, Smi::kMinValue);
+ TestSmiShiftLogicalRight(masm, &exit, 0xD0, -1);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestSmiShiftArithmeticRight(MacroAssembler* masm,
+ Label* exit,
+ int id,
+ int x) {
+ const int shifts[] = { 0, 1, 7, 24, kSmiValueSize - 1};
+ const int kNumShifts = 5;
+ __ movl(rax, Immediate(id));
+ for (int i = 0; i < kNumShifts; i++) {
+ int shift = shifts[i];
+ // Guaranteed arithmetic shift.
+ int result = (x < 0) ? ~((~x) >> shift) : (x >> shift);
+ __ Move(r8, Smi::FromInt(result));
+ __ Move(rcx, Smi::FromInt(x));
+ __ SmiShiftArithmeticRightConstant(rcx, rcx, shift);
+
+ __ SmiCompare(rcx, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ __ Move(rdx, Smi::FromInt(x));
+ __ Move(r11, Smi::FromInt(shift));
+ __ SmiShiftArithmeticRight(rdx, rdx, r11);
+
+ __ SmiCompare(rdx, r8);
+ __ j(not_equal, exit);
+
+ __ incq(rax);
+ }
+}
+
+
+TEST(SmiShiftArithmeticRight) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestSmiShiftArithmeticRight(masm, &exit, 0x10, 0);
+ TestSmiShiftArithmeticRight(masm, &exit, 0x20, 1);
+ TestSmiShiftArithmeticRight(masm, &exit, 0x30, 127);
+ TestSmiShiftArithmeticRight(masm, &exit, 0x40, 65535);
+ TestSmiShiftArithmeticRight(masm, &exit, 0x50, Smi::kMaxValue);
+ TestSmiShiftArithmeticRight(masm, &exit, 0x60, Smi::kMinValue);
+ TestSmiShiftArithmeticRight(masm, &exit, 0x70, -1);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+void TestPositiveSmiPowerUp(MacroAssembler* masm, Label* exit, int id, int x) {
+ ASSERT(x >= 0);
+ int powers[] = { 0, 1, 2, 3, 8, 16, 24, 31 };
+ int power_count = 8;
+ __ movl(rax, Immediate(id));
+ for (int i = 0; i < power_count; i++) {
+ int power = powers[i];
+ intptr_t result = static_cast<intptr_t>(x) << power;
+ __ Set(r8, result);
+ __ Move(rcx, Smi::FromInt(x));
+ __ movq(r11, rcx);
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rcx, power);
+ __ SmiCompare(rdx, r8);
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ SmiCompare(r11, rcx); // rcx unchanged.
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rcx, rcx, power);
+ __ SmiCompare(rdx, r8);
+ __ j(not_equal, exit);
+ __ incq(rax);
+ }
+}
+
+
+TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer =
+ static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+ &actual_size,
+ true));
+ CHECK(buffer);
+ HandleScope handles;
+ MacroAssembler assembler(buffer, actual_size);
+
+ MacroAssembler* masm = &assembler;
+ masm->set_allow_stub_calls(false);
+ Label exit;
+
+ TestPositiveSmiPowerUp(masm, &exit, 0x20, 0);
+ TestPositiveSmiPowerUp(masm, &exit, 0x40, 1);
+ TestPositiveSmiPowerUp(masm, &exit, 0x60, 127);
+ TestPositiveSmiPowerUp(masm, &exit, 0x80, 128);
+ TestPositiveSmiPowerUp(masm, &exit, 0xA0, 255);
+ TestPositiveSmiPowerUp(masm, &exit, 0xC0, 256);
+ TestPositiveSmiPowerUp(masm, &exit, 0x100, 65535);
+ TestPositiveSmiPowerUp(masm, &exit, 0x120, 65536);
+ TestPositiveSmiPowerUp(masm, &exit, 0x140, Smi::kMaxValue);
+
+ __ xor_(rax, rax); // Success.
+ __ bind(&exit);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
+
+
+#undef __
return array.sort();
}
-assertEquals(0, props({}).length);
-assertEquals(1, props({x:1}).length);
-assertEquals(2, props({x:1, y:2}).length);
+assertEquals(0, props({}).length, "olen0");
+assertEquals(1, props({x:1}).length, "olen1");
+assertEquals(2, props({x:1, y:2}).length, "olen2");
-assertArrayEquals(["x"], props({x:1}));
-assertArrayEquals(["x", "y"], props({x:1, y:2}));
-assertArrayEquals(["x", "y", "zoom"], props({x:1, y:2, zoom:3}));
+assertArrayEquals(["x"], props({x:1}), "x");
+assertArrayEquals(["x", "y"], props({x:1, y:2}), "xy");
+assertArrayEquals(["x", "y", "zoom"], props({x:1, y:2, zoom:3}), "xyzoom");
-assertEquals(0, props([]).length);
-assertEquals(1, props([1]).length);
-assertEquals(2, props([1,2]).length);
+assertEquals(0, props([]).length, "alen0");
+assertEquals(1, props([1]).length, "alen1");
+assertEquals(2, props([1,2]).length, "alen2");
-assertArrayEquals(["0"], props([1]));
-assertArrayEquals(["0", "1"], props([1,2]));
-assertArrayEquals(["0", "1", "2"], props([1,2,3]));
+assertArrayEquals(["0"], props([1]), "0");
+assertArrayEquals(["0", "1"], props([1,2]), "01");
+assertArrayEquals(["0", "1", "2"], props([1,2,3]), "012");
var o = {};
var a = [];
a.push(s);
o[s] = i;
}
-assertArrayEquals(a, props(o));
+assertArrayEquals(a, props(o), "charcodes");
var a = [];
-assertEquals(0, props(a).length);
+assertEquals(0, props(a).length, "proplen0");
a[Math.pow(2,30)-1] = 0;
-assertEquals(1, props(a).length);
+assertEquals(1, props(a).length, "proplen1");
a[Math.pow(2,31)-1] = 0;
-assertEquals(2, props(a).length);
+assertEquals(2, props(a).length, "proplen2");
a[1] = 0;
-assertEquals(3, props(a).length);
+assertEquals(3, props(a).length, "proplen3");
for (var hest = 'hest' in {}) { }
-assertEquals('hest', hest);
+assertEquals('hest', hest, "empty-no-override");
var result = '';
for (var p in {a : [0], b : 1}) { result += p; }
-assertEquals('ab', result);
+assertEquals('ab', result, "ab");
var result = '';
for (var p in {a : {v:1}, b : 1}) { result += p; }
-assertEquals('ab', result);
+assertEquals('ab', result, "ab-nodeep");
var result = '';
for (var p in { get a() {}, b : 1}) { result += p; }
-assertEquals('ab', result);
+assertEquals('ab', result, "abget");
var result = '';
for (var p in { get a() {}, set a(x) {}, b : 1}) { result += p; }
-assertEquals('ab', result);
+assertEquals('ab', result, "abgetset");
// Ensure that we can correctly change the sign of the most negative smi.
-assertEquals(1073741824, -1073741824 * -1);
-assertEquals(1073741824, -1073741824 / -1);
-assertEquals(1073741824, -(-1073741824));
-assertEquals(1073741824, 0 - (-1073741824));
-
-var min_smi = -1073741824;
-
-assertEquals(1073741824, min_smi * -1);
-assertEquals(1073741824, min_smi / -1);
-assertEquals(1073741824, -min_smi);
-assertEquals(1073741824, 0 - min_smi);
-
-var zero = 0;
-var minus_one = -1;
-
-assertEquals(1073741824, min_smi * minus_one);
-assertEquals(1073741824, min_smi / minus_one);
-assertEquals(1073741824, -min_smi);
-assertEquals(1073741824, zero - min_smi);
-
-assertEquals(1073741824, -1073741824 * minus_one);
-assertEquals(1073741824, -1073741824 / minus_one);
-assertEquals(1073741824, -(-1073741824));
-assertEquals(1073741824, zero - (-1073741824));
-
-var half_min_smi = -(1<<15);
-var half_max_smi = (1<<15);
-
-assertEquals(1073741824, -half_min_smi * half_max_smi);
-assertEquals(1073741824, half_min_smi * -half_max_smi);
-assertEquals(1073741824, half_max_smi * -half_min_smi);
-assertEquals(1073741824, -half_max_smi * half_min_smi);
+// Possible Smi ranges.
+var ranges = [{min: -1073741824, max: 1073741823, bits: 31},
+ {min: -2147483648, max: 2147483647, bits: 32}];
+
+for (var i = 0; i < ranges.length; i++) {
+ var range = ranges[i];
+ var min_smi = range.min;
+ var max_smi = range.max;
+ var bits = range.bits;
+ var name = bits + "-bit";
+
+ var result = max_smi + 1;
+
+ // Min smi as literal
+ assertEquals(result, eval(min_smi + " * -1"), name + "-litconmult");
+ assertEquals(result, eval(min_smi + " / -1"), name + "-litcondiv");
+ assertEquals(result, eval("-(" + min_smi + ")"), name + "-litneg");
+ assertEquals(result, eval("0 - (" + min_smi + ")")), name + "-conlitsub";
+
+ // As variable:
+ assertEquals(result, min_smi * -1, name + "-varconmult");
+ assertEquals(result, min_smi / -1, name + "-varcondiv");
+ assertEquals(result, -min_smi, name + "-varneg");
+ assertEquals(result, 0 - min_smi, name + "-convarsub");
+
+ // Only variables:
+ var zero = 0;
+ var minus_one = -1;
+
+ assertEquals(result, min_smi * minus_one, name + "-varvarmult");
+ assertEquals(result, min_smi / minus_one, name + "-varvardiv");
+ assertEquals(result, zero - min_smi, name + "-varvarsub");
+
+ // Constants as variables
+ assertEquals(result, eval(min_smi + " * minus_one"), name + "-litvarmult");
+ assertEquals(result, eval(min_smi + " / minus_one"), name + "-litvarmdiv");
+ assertEquals(result, eval("0 - (" + min_smi + ")"), name + "-varlitsub");
+
+ var half_min_smi = -(1 << (bits >> 1));
+ var half_max_smi = 1 << ((bits - 1) >> 1);
+
+ assertEquals(max_smi + 1, -half_min_smi * half_max_smi, name + "-half1");
+ assertEquals(max_smi + 1, half_min_smi * -half_max_smi, name + "-half2");
+ assertEquals(max_smi + 1, half_max_smi * -half_min_smi, name + "-half3");
+ assertEquals(max_smi + 1, -half_max_smi * half_min_smi, name + "-half4");
+}
// variable op variable
-assertEquals(one / (-zero), -Infinity, "one / -0 I");
+assertEquals(-Infinity, one / (-zero), "one / -0 I");
-assertEquals(one / (zero * minus_one), -Infinity, "one / -1");
-assertEquals(one / (minus_one * zero), -Infinity, "one / -0 II");
-assertEquals(one / (zero * zero), Infinity, "one / 0 I");
-assertEquals(one / (minus_one * minus_one), 1, "one / 1");
+assertEquals(-Infinity, one / (zero * minus_one), "one / -1");
+assertEquals(-Infinity, one / (minus_one * zero), "one / -0 II");
+assertEquals(Infinity, one / (zero * zero), "one / 0 I");
+assertEquals(1, one / (minus_one * minus_one), "one / 1");
-assertEquals(one / (zero / minus_one), -Infinity, "one / -0 III");
-assertEquals(one / (zero / one), Infinity, "one / 0 II");
+assertEquals(-Infinity, one / (zero / minus_one), "one / -0 III");
+assertEquals(Infinity, one / (zero / one), "one / 0 II");
-assertEquals(one / (minus_four % two), -Infinity, "foo1");
-assertEquals(one / (minus_four % minus_two), -Infinity, "foo2");
-assertEquals(one / (four % two), Infinity, "foo3");
-assertEquals(one / (four % minus_two), Infinity, "foo4");
+assertEquals(-Infinity, one / (minus_four % two), "foo1");
+assertEquals(-Infinity, one / (minus_four % minus_two), "foo2");
+assertEquals(Infinity, one / (four % two), "foo3");
+assertEquals(Infinity, one / (four % minus_two), "foo4");
// literal op variable
-assertEquals(one / (0 * minus_one), -Infinity, "bar1");
-assertEquals(one / (-1 * zero), -Infinity, "bar2");
-assertEquals(one / (0 * zero), Infinity, "bar3");
-assertEquals(one / (-1 * minus_one), 1, "bar4");
+assertEquals(-Infinity, one / (0 * minus_one), "bar1");
+assertEquals(-Infinity, one / (-1 * zero), "bar2");
+assertEquals(Infinity, one / (0 * zero), "bar3");
+assertEquals(1, one / (-1 * minus_one), "bar4");
-assertEquals(one / (0 / minus_one), -Infinity, "baz1");
-assertEquals(one / (0 / one), Infinity, "baz2");
+assertEquals(-Infinity, one / (0 / minus_one), "baz1");
+assertEquals(Infinity, one / (0 / one), "baz2");
-assertEquals(one / (-4 % two), -Infinity, "baz3");
-assertEquals(one / (-4 % minus_two), -Infinity, "baz4");
-assertEquals(one / (4 % two), Infinity, "baz5");
-assertEquals(one / (4 % minus_two), Infinity, "baz6");
+assertEquals(-Infinity, one / (-4 % two), "baz3");
+assertEquals(-Infinity, one / (-4 % minus_two), "baz4");
+assertEquals(Infinity, one / (4 % two), "baz5");
+assertEquals(Infinity, one / (4 % minus_two), "baz6");
// variable op literal
-assertEquals(one / (zero * -1), -Infinity, "fizz1");
-assertEquals(one / (minus_one * 0), -Infinity, "fizz2");
-assertEquals(one / (zero * 0), Infinity, "fizz3");
-assertEquals(one / (minus_one * -1), 1, "fizz4");
+assertEquals(-Infinity, one / (zero * -1), "fizz1");
+assertEquals(-Infinity, one / (minus_one * 0), "fizz2");
+assertEquals(Infinity, one / (zero * 0), "fizz3");
+assertEquals(1, one / (minus_one * -1), "fizz4");
-assertEquals(one / (zero / -1), -Infinity, "buzz1");
-assertEquals(one / (zero / 1), Infinity, "buzz2");
+assertEquals(-Infinity, one / (zero / -1), "buzz1");
+assertEquals(Infinity, one / (zero / 1), "buzz2");
-assertEquals(one / (minus_four % 2), -Infinity, "buzz3");
-assertEquals(one / (minus_four % -2), -Infinity, "buzz4");
-assertEquals(one / (four % 2), Infinity, "buzz5");
-assertEquals(one / (four % -2), Infinity, "buzz6");
+assertEquals(-Infinity, one / (minus_four % 2), "buzz3");
+assertEquals(-Infinity, one / (minus_four % -2), "buzz4");
+assertEquals(Infinity, one / (four % 2), "buzz5");
+assertEquals(Infinity, one / (four % -2), "buzz6");
// literal op literal
-assertEquals(one / (-0), -Infinity, "fisk1");
+assertEquals(-Infinity, one / (-0), "fisk1");
-assertEquals(one / (0 * -1), -Infinity, "fisk2");
-assertEquals(one / (-1 * 0), -Infinity, "fisk3");
-assertEquals(one / (0 * 0), Infinity, "fisk4");
-assertEquals(one / (-1 * -1), 1, "fisk5");
+assertEquals(-Infinity, one / (0 * -1), "fisk2");
+assertEquals(-Infinity, one / (-1 * 0), "fisk3");
+assertEquals(Infinity, one / (0 * 0), "fisk4");
+assertEquals(1, one / (-1 * -1), "fisk5");
-assertEquals(one / (0 / -1), -Infinity, "hest1");
-assertEquals(one / (0 / 1), Infinity, "hest2");
+assertEquals(-Infinity, one / (0 / -1), "hest1");
+assertEquals(Infinity, one / (0 / 1), "hest2");
-assertEquals(one / (-4 % 2), -Infinity, "fiskhest1");
-assertEquals(one / (-4 % -2), -Infinity, "fiskhest2");
-assertEquals(one / (4 % 2), Infinity, "fiskhest3");
-assertEquals(one / (4 % -2), Infinity, "fiskhest4");
+assertEquals(-Infinity, one / (-4 % 2), "fiskhest1");
+assertEquals(-Infinity, one / (-4 % -2), "fiskhest2");
+assertEquals(Infinity, one / (4 % 2), "fiskhest3");
+assertEquals(Infinity, one / (4 % -2), "fiskhest4");