Operand Operand::UntagSmi(Register smi) {
- STATIC_ASSERT((kSmiShift + kSmiValueSize) == kXRegSizeInBits);
ASSERT(smi.Is64Bits());
return Operand(smi, ASR, kSmiShift);
}
Operand Operand::UntagSmiAndScale(Register smi, int scale) {
- STATIC_ASSERT((kSmiShift + kSmiValueSize) == kXRegSizeInBits);
ASSERT(smi.Is64Bits());
ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
if (scale > kSmiShift) {
// TODO(jbramley): Check that the stack usage here is safe.
__ Sub(x10, jssp, x10);
// Check if the arguments will overflow the stack.
- __ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2));
+ __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2));
__ B(gt, &enough_stack_space);
// There is not enough stack space, so use a builtin to throw an appropriate
// error.
Register caller_fp = x10;
__ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Load and untag the context.
- __ Ldr(w11, UntagSmiMemOperand(caller_fp,
- StandardFrameConstants::kContextOffset));
+ STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
+ __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
+ (kSmiShift / kBitsPerByte)));
__ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
__ B(ne, &runtime);
// Store the smi values in the last match info.
__ SmiTag(x10, current_offset);
// Clearing the 32 bottom bits gives us a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- __ Bic(x11, current_offset, kSmiShiftMask);
+ STATIC_ASSERT(kSmiShift == 32);
+ __ And(x11, current_offset, ~kWRegMask);
__ Stp(x10,
x11,
MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged ASCII char code.
- __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
+ STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
+ __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
__ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
__ Bind(&exit_);
// hash = character + (character << 10);
__ LoadRoot(hash, Heap::kHashSeedRootIndex);
// Untag smi seed and add the character.
- __ Add(hash, character, Operand::UntagSmi(hash));
+ __ Add(hash, character, Operand(hash, LSR, kSmiShift));
// Compute hashes modulo 2^32 using a 32-bit W register.
Register hash_w = hash.W();
// jssp[8]: 0x00000000 (SMI tag & padding)
// jssp[4]: reg[31:0]
// jssp[0]: 0x00000000 (SMI tag & padding)
- STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == kWRegSizeInBits));
+ STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
}
if (object_regs != 0) {
break;
case Token::MUL: {
Label not_minus_zero, done;
- STATIC_ASSERT(kSmiShift == (kXRegSizeInBits / 2));
- STATIC_ASSERT(kSmiTag == 0);
__ Smulh(x10, left, right);
__ Cbnz(x10, ¬_minus_zero);
__ Eor(x11, left, right);
__ Tbnz(x11, kXSignBit, &stub_call);
+ STATIC_ASSERT(kSmiTag == 0);
__ Mov(result, x10);
__ B(&done);
__ Bind(¬_minus_zero);
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- uint64_t sign_mask = V8_UINT64_C(1) << (kSmiShift + kSmiValueSize - 1);
-
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ TestAndSplit(x0, kSmiTagMask | sign_mask, if_true, if_false, fall_through);
+ __ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true,
+ if_false, fall_through);
context()->Plug(if_true, if_false);
}
ElementsKind elements_kind,
Representation representation,
int base_offset) {
- STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0));
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
int element_size_shift = ElementsKindToShiftSize(elements_kind);
// Even though the HLoad/StoreKeyed instructions force the input
__ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
if (representation.IsInteger32()) {
ASSERT(elements_kind == FAST_SMI_ELEMENTS);
- // Read or write only the smi payload in the case of fast smi arrays.
+ // Read or write only the most-significant 32 bits in the case of fast smi
+ // arrays.
return UntagSmiMemOperand(base, base_offset);
} else {
return MemOperand(base, base_offset);
ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
if (representation.IsInteger32()) {
ASSERT(elements_kind == FAST_SMI_ELEMENTS);
- // Read or write only the smi payload in the case of fast smi arrays.
+ // Read or write only the most-significant 32 bits in the case of fast smi
+ // arrays.
__ Add(base, elements, Operand(key, SXTW, element_size_shift));
return UntagSmiMemOperand(base, base_offset);
} else {
ToInteger32(const_operand) * kPointerSize;
if (representation.IsInteger32()) {
ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
- STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0));
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
+ (kSmiTag == 0));
mem_op = UntagSmiMemOperand(elements, offset);
} else {
mem_op = MemOperand(elements, offset);
if (access.representation().IsSmi() &&
instr->hydrogen()->representation().IsInteger32()) {
// Read int value directly from upper half of the smi.
- STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0));
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
__ Load(result, UntagSmiFieldMemOperand(source, offset),
Representation::Integer32());
} else {
if (representation.IsInteger32()) {
ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
- STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0));
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
+ (kSmiTag == 0));
mem_op = UntagSmiMemOperand(store_base, offset);
} else {
mem_op = MemOperand(store_base, offset);
__ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
}
#endif
- STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0));
+ STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
__ Store(value, UntagSmiFieldMemOperand(destination, offset),
Representation::Integer32());
} else {
void MacroAssembler::SmiTag(Register dst, Register src) {
- STATIC_ASSERT(kSmiValueSize + kSmiShift == kXRegSizeInBits);
ASSERT(dst.Is64Bits() && src.Is64Bits());
Lsl(dst, src, kSmiShift);
}
void MacroAssembler::SmiUntag(Register dst, Register src) {
- STATIC_ASSERT(kSmiValueSize + kSmiShift == kXRegSizeInBits);
ASSERT(dst.Is64Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts) {
AssertSmi(src);
void MacroAssembler::SmiTagAndPush(Register src) {
- STATIC_ASSERT((kSmiShift == kWRegSizeInBits) &&
- (kSmiValueSize == kWRegSizeInBits) &&
- (kSmiTag == 0));
+ STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
Push(src.W(), wzr);
}
void MacroAssembler::SmiTagAndPush(Register src1, Register src2) {
- STATIC_ASSERT((kSmiShift == kWRegSizeInBits) &&
- (kSmiValueSize == kWRegSizeInBits) &&
- (kSmiTag == 0));
+ STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
Push(src1.W(), wzr, src2.W(), wzr);
}