From 3f46e4b0468a704fb5aed576f831b115852c2f6d Mon Sep 17 00:00:00 2001 From: "dslomov@chromium.org" Date: Mon, 7 Jul 2014 14:29:07 +0000 Subject: [PATCH] Revert "ARM64: Use UntagSmi helpers and clean up assertions." This reverts commit r22247 for breaking Linux arm64 build. TBR=Jacob.Bramley@arm.com Review URL: https://codereview.chromium.org/376553006 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22249 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arm64/assembler-arm64-inl.h | 2 -- src/arm64/builtins-arm64.cc | 2 +- src/arm64/code-stubs-arm64.cc | 14 ++++++++------ src/arm64/debug-arm64.cc | 2 +- src/arm64/full-codegen-arm64.cc | 8 +++----- src/arm64/lithium-codegen-arm64.cc | 18 +++++++++++------- src/arm64/macro-assembler-arm64-inl.h | 10 ++-------- 7 files changed, 26 insertions(+), 30 deletions(-) diff --git a/src/arm64/assembler-arm64-inl.h b/src/arm64/assembler-arm64-inl.h index 960fe63..444f2c4 100644 --- a/src/arm64/assembler-arm64-inl.h +++ b/src/arm64/assembler-arm64-inl.h @@ -430,14 +430,12 @@ unsigned Operand::shift_amount() const { Operand Operand::UntagSmi(Register smi) { - STATIC_ASSERT((kSmiShift + kSmiValueSize) == kXRegSizeInBits); ASSERT(smi.Is64Bits()); return Operand(smi, ASR, kSmiShift); } Operand Operand::UntagSmiAndScale(Register smi, int scale) { - STATIC_ASSERT((kSmiShift + kSmiValueSize) == kXRegSizeInBits); ASSERT(smi.Is64Bits()); ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize))); if (scale > kSmiShift) { diff --git a/src/arm64/builtins-arm64.cc b/src/arm64/builtins-arm64.cc index 9c5e3e9..31b61f4 100644 --- a/src/arm64/builtins-arm64.cc +++ b/src/arm64/builtins-arm64.cc @@ -1246,7 +1246,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // TODO(jbramley): Check that the stack usage here is safe. __ Sub(x10, jssp, x10); // Check if the arguments will overflow the stack. - __ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2)); + __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2)); __ B(gt, &enough_stack_space); // There is not enough stack space, so use a builtin to throw an appropriate // error. diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc index e24c7bd..0e08907 100644 --- a/src/arm64/code-stubs-arm64.cc +++ b/src/arm64/code-stubs-arm64.cc @@ -1988,8 +1988,9 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { Register caller_fp = x10; __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); // Load and untag the context. - __ Ldr(w11, UntagSmiMemOperand(caller_fp, - StandardFrameConstants::kContextOffset)); + STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4); + __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset + + (kSmiShift / kBitsPerByte))); __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR); __ B(ne, &runtime); @@ -2837,8 +2838,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Store the smi values in the last match info. __ SmiTag(x10, current_offset); // Clearing the 32 bottom bits gives us a Smi. - STATIC_ASSERT(kSmiTag == 0); - __ Bic(x11, current_offset, kSmiShiftMask); + STATIC_ASSERT(kSmiShift == 32); + __ And(x11, current_offset, ~kWRegMask); __ Stp(x10, x11, MemOperand(last_match_offsets, kXRegSize * 2, PostIndex)); @@ -3477,7 +3478,8 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); // At this point code register contains smi tagged ASCII char code. - __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2)); + STATIC_ASSERT(kSmiShift > kPointerSizeLog2); + __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2)); __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_); __ Bind(&exit_); @@ -3846,7 +3848,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm, // hash = character + (character << 10); __ LoadRoot(hash, Heap::kHashSeedRootIndex); // Untag smi seed and add the character. - __ Add(hash, character, Operand::UntagSmi(hash)); + __ Add(hash, character, Operand(hash, LSR, kSmiShift)); // Compute hashes modulo 2^32 using a 32-bit W register. Register hash_w = hash.W(); diff --git a/src/arm64/debug-arm64.cc b/src/arm64/debug-arm64.cc index 83b0606..43684d5 100644 --- a/src/arm64/debug-arm64.cc +++ b/src/arm64/debug-arm64.cc @@ -171,7 +171,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, // jssp[8]: 0x00000000 (SMI tag & padding) // jssp[4]: reg[31:0] // jssp[0]: 0x00000000 (SMI tag & padding) - STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == kWRegSizeInBits)); + STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32)); } if (object_regs != 0) { diff --git a/src/arm64/full-codegen-arm64.cc b/src/arm64/full-codegen-arm64.cc index 80b004c..438907e 100644 --- a/src/arm64/full-codegen-arm64.cc +++ b/src/arm64/full-codegen-arm64.cc @@ -2029,12 +2029,11 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, break; case Token::MUL: { Label not_minus_zero, done; - STATIC_ASSERT(kSmiShift == (kXRegSizeInBits / 2)); - STATIC_ASSERT(kSmiTag == 0); __ Smulh(x10, left, right); __ Cbnz(x10, ¬_minus_zero); __ Eor(x11, left, right); __ Tbnz(x11, kXSignBit, &stub_call); + STATIC_ASSERT(kSmiTag == 0); __ Mov(result, x10); __ B(&done); __ Bind(¬_minus_zero); @@ -2593,10 +2592,9 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - uint64_t sign_mask = V8_UINT64_C(1) << (kSmiShift + kSmiValueSize - 1); - PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - __ TestAndSplit(x0, kSmiTagMask | sign_mask, if_true, if_false, fall_through); + __ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true, + if_false, fall_through); context()->Plug(if_true, if_false); } diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc index 61df824..ed6fde3 100644 --- a/src/arm64/lithium-codegen-arm64.cc +++ b/src/arm64/lithium-codegen-arm64.cc @@ -3525,7 +3525,7 @@ MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base, ElementsKind elements_kind, Representation representation, int base_offset) { - STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0)); + STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0)); int element_size_shift = ElementsKindToShiftSize(elements_kind); // Even though the HLoad/StoreKeyed instructions force the input @@ -3536,7 +3536,8 @@ MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base, __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift)); if (representation.IsInteger32()) { ASSERT(elements_kind == FAST_SMI_ELEMENTS); - // Read or write only the smi payload in the case of fast smi arrays. + // Read or write only the most-significant 32 bits in the case of fast smi + // arrays. return UntagSmiMemOperand(base, base_offset); } else { return MemOperand(base, base_offset); @@ -3547,7 +3548,8 @@ MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base, ASSERT((element_size_shift >= 0) && (element_size_shift <= 4)); if (representation.IsInteger32()) { ASSERT(elements_kind == FAST_SMI_ELEMENTS); - // Read or write only the smi payload in the case of fast smi arrays. + // Read or write only the most-significant 32 bits in the case of fast smi + // arrays. __ Add(base, elements, Operand(key, SXTW, element_size_shift)); return UntagSmiMemOperand(base, base_offset); } else { @@ -3610,7 +3612,8 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { ToInteger32(const_operand) * kPointerSize; if (representation.IsInteger32()) { ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); - STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0)); + STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && + (kSmiTag == 0)); mem_op = UntagSmiMemOperand(elements, offset); } else { mem_op = MemOperand(elements, offset); @@ -3680,7 +3683,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { if (access.representation().IsSmi() && instr->hydrogen()->representation().IsInteger32()) { // Read int value directly from upper half of the smi. - STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0)); + STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); __ Load(result, UntagSmiFieldMemOperand(source, offset), Representation::Integer32()); } else { @@ -5283,7 +5286,8 @@ void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) { if (representation.IsInteger32()) { ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); - STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0)); + STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && + (kSmiTag == 0)); mem_op = UntagSmiMemOperand(store_base, offset); } else { mem_op = MemOperand(store_base, offset); @@ -5402,7 +5406,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); } #endif - STATIC_ASSERT((kSmiValueSize == kWRegSizeInBits) && (kSmiTag == 0)); + STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); __ Store(value, UntagSmiFieldMemOperand(destination, offset), Representation::Integer32()); } else { diff --git a/src/arm64/macro-assembler-arm64-inl.h b/src/arm64/macro-assembler-arm64-inl.h index b2aaea2..3ce4855 100644 --- a/src/arm64/macro-assembler-arm64-inl.h +++ b/src/arm64/macro-assembler-arm64-inl.h @@ -1308,7 +1308,6 @@ void MacroAssembler::InitializeRootRegister() { void MacroAssembler::SmiTag(Register dst, Register src) { - STATIC_ASSERT(kSmiValueSize + kSmiShift == kXRegSizeInBits); ASSERT(dst.Is64Bits() && src.Is64Bits()); Lsl(dst, src, kSmiShift); } @@ -1318,7 +1317,6 @@ void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); } void MacroAssembler::SmiUntag(Register dst, Register src) { - STATIC_ASSERT(kSmiValueSize + kSmiShift == kXRegSizeInBits); ASSERT(dst.Is64Bits() && src.Is64Bits()); if (FLAG_enable_slow_asserts) { AssertSmi(src); @@ -1353,17 +1351,13 @@ void MacroAssembler::SmiUntagToFloat(FPRegister dst, void MacroAssembler::SmiTagAndPush(Register src) { - STATIC_ASSERT((kSmiShift == kWRegSizeInBits) && - (kSmiValueSize == kWRegSizeInBits) && - (kSmiTag == 0)); + STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); Push(src.W(), wzr); } void MacroAssembler::SmiTagAndPush(Register src1, Register src2) { - STATIC_ASSERT((kSmiShift == kWRegSizeInBits) && - (kSmiValueSize == kWRegSizeInBits) && - (kSmiTag == 0)); + STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); Push(src1.W(), wzr, src2.W(), wzr); } -- 2.7.4