1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef V8_ARM_CODEGEN_ARM_H_
29 #define V8_ARM_CODEGEN_ARM_H_
36 // Forward declarations
37 class CompilationInfo;
39 class RegisterAllocator;
42 enum InitState { CONST_INIT, NOT_CONST_INIT };
43 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
46 // -------------------------------------------------------------------------
49 // A reference is a C++ stack-allocated object that puts a
50 // reference on the virtual frame. The reference may be consumed
51 // by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
52 // When the lifetime (scope) of a valid reference ends, it must have
53 // been consumed, and be in state UNLOADED.
54 class Reference BASE_EMBEDDED {
56 // The values of the types is important, see size().
57 enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
58 Reference(CodeGenerator* cgen,
59 Expression* expression,
60 bool persist_after_get = false);
63 Expression* expression() const { return expression_; }
64 Type type() const { return type_; }
65 void set_type(Type value) {
66 ASSERT_EQ(ILLEGAL, type_);
71 ASSERT_NE(ILLEGAL, type_);
72 ASSERT_NE(UNLOADED, type_);
75 // The size the reference takes up on the stack.
77 return (type_ < SLOT) ? 0 : type_;
80 bool is_illegal() const { return type_ == ILLEGAL; }
81 bool is_slot() const { return type_ == SLOT; }
82 bool is_property() const { return type_ == NAMED || type_ == KEYED; }
83 bool is_unloaded() const { return type_ == UNLOADED; }
85 // Return the name. Only valid for named property references.
86 Handle<String> GetName();
88 // Generate code to push the value of the reference on top of the
89 // expression stack. The reference is expected to be already on top of
90 // the expression stack, and it is consumed by the call unless the
91 // reference is for a compound assignment.
92 // If the reference is not consumed, it is left in place under its value.
95 // Generate code to store the value on top of the expression stack in the
96 // reference. The reference is expected to be immediately below the value
97 // on the expression stack. The value is stored in the location specified
98 // by the reference, and is left on top of the stack, after the reference
99 // is popped from beneath it (unloaded).
100 void SetValue(InitState init_state);
103 CodeGenerator* cgen_;
104 Expression* expression_;
106 // Keep the reference on the stack after get, so it can be used by set later.
107 bool persist_after_get_;
111 // -------------------------------------------------------------------------
112 // Code generation state
114 // The state is passed down the AST by the code generator (and back up, in
115 // the form of the state of the label pair). It is threaded through the
116 // call stack. Constructing a state implicitly pushes it on the owning code
117 // generator's stack of states, and destroying one implicitly pops it.
119 class CodeGenState BASE_EMBEDDED {
121 // Create an initial code generator state. Destroying the initial state
122 // leaves the code generator with a NULL state.
123 explicit CodeGenState(CodeGenerator* owner);
125 // Create a code generator state based on a code generator's current
126 // state. The new state has its own pair of branch labels.
127 CodeGenState(CodeGenerator* owner,
128 JumpTarget* true_target,
129 JumpTarget* false_target);
131 // Destroy a code generator state and restore the owning code generator's
135 JumpTarget* true_target() const { return true_target_; }
136 JumpTarget* false_target() const { return false_target_; }
139 CodeGenerator* owner_;
140 JumpTarget* true_target_;
141 JumpTarget* false_target_;
142 CodeGenState* previous_;
146 // -------------------------------------------------------------------------
147 // Arguments allocation mode
149 enum ArgumentsAllocationMode {
150 NO_ARGUMENTS_ALLOCATION,
151 EAGER_ARGUMENTS_ALLOCATION,
152 LAZY_ARGUMENTS_ALLOCATION
156 // Different nop operations are used by the code generator to detect certain
157 // states of the generated code.
158 enum NopMarkerTypes {
160 PROPERTY_LOAD_INLINED
164 // -------------------------------------------------------------------------
167 class CodeGenerator: public AstVisitor {
169 // Takes a function literal, generates code for it. This function should only
170 // be called by compiler.cc.
171 static Handle<Code> MakeCode(CompilationInfo* info);
173 // Printing of AST, etc. as requested by flags.
174 static void MakeCodePrologue(CompilationInfo* info);
176 // Allocate and install the code.
177 static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
179 CompilationInfo* info);
181 #ifdef ENABLE_LOGGING_AND_PROFILING
182 static bool ShouldGenerateLog(Expression* type);
185 static void SetFunctionInfo(Handle<JSFunction> fun,
186 FunctionLiteral* lit,
188 Handle<Script> script);
190 static void RecordPositions(MacroAssembler* masm, int pos);
193 MacroAssembler* masm() { return masm_; }
194 VirtualFrame* frame() const { return frame_; }
195 inline Handle<Script> script();
197 bool has_valid_frame() const { return frame_ != NULL; }
199 // Set the virtual frame to be new_frame, with non-frame register
200 // reference counts given by non_frame_registers. The non-frame
201 // register reference counts of the old frame are returned in
202 // non_frame_registers.
203 void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
207 RegisterAllocator* allocator() const { return allocator_; }
209 CodeGenState* state() { return state_; }
210 void set_state(CodeGenState* state) { state_ = state; }
212 void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
214 static const int kUnknownIntValue = -1;
216 // If the name is an inline runtime function call return the number of
217 // expected arguments. Otherwise return -1.
218 static int InlineRuntimeCallArgumentsCount(Handle<String> name);
221 // Construction/Destruction
222 explicit CodeGenerator(MacroAssembler* masm);
225 inline bool is_eval();
226 inline Scope* scope();
228 // Generating deferred code.
229 void ProcessDeferred();
232 bool has_cc() const { return cc_reg_ != al; }
233 JumpTarget* true_target() const { return state_->true_target(); }
234 JumpTarget* false_target() const { return state_->false_target(); }
236 // Track loop nesting level.
237 int loop_nesting() const { return loop_nesting_; }
238 void IncrementLoopNesting() { loop_nesting_++; }
239 void DecrementLoopNesting() { loop_nesting_--; }
242 void VisitStatements(ZoneList<Statement*>* statements);
244 #define DEF_VISIT(type) \
245 void Visit##type(type* node);
246 AST_NODE_LIST(DEF_VISIT)
249 // Visit a statement and then spill the virtual frame if control flow can
250 // reach the end of the statement (ie, it does not exit via break,
251 // continue, return, or throw). This function is used temporarily while
252 // the code generator is being transformed.
253 inline void VisitAndSpill(Statement* statement);
255 // Visit a list of statements and then spill the virtual frame if control
256 // flow can reach the end of the list.
257 inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
259 // Main code generation function
260 void Generate(CompilationInfo* info);
262 // Returns the arguments allocation mode.
263 ArgumentsAllocationMode ArgumentsMode();
265 // Store the arguments object and allocate it if necessary.
266 void StoreArgumentsObject(bool initial);
268 // The following are used by class Reference.
269 void LoadReference(Reference* ref);
270 void UnloadReference(Reference* ref);
272 static MemOperand ContextOperand(Register context, int index) {
273 return MemOperand(context, Context::SlotOffset(index));
276 MemOperand SlotOperand(Slot* slot, Register tmp);
278 MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
284 static MemOperand GlobalObject() {
285 return ContextOperand(cp, Context::GLOBAL_INDEX);
288 void LoadCondition(Expression* x,
289 JumpTarget* true_target,
290 JumpTarget* false_target,
292 void Load(Expression* expr);
294 void LoadGlobalReceiver(Register scratch);
296 // Generate code to push the value of an expression on top of the frame
297 // and then spill the frame fully to memory. This function is used
298 // temporarily while the code generator is being transformed.
299 inline void LoadAndSpill(Expression* expression);
301 // Call LoadCondition and then spill the virtual frame unless control flow
302 // cannot reach the end of the expression (ie, by emitting only
303 // unconditional jumps to the control targets).
304 inline void LoadConditionAndSpill(Expression* expression,
305 JumpTarget* true_target,
306 JumpTarget* false_target,
309 // Read a value from a slot and leave it on top of the expression stack.
310 void LoadFromSlot(Slot* slot, TypeofState typeof_state);
311 void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
312 // Store the value on top of the stack to a slot.
313 void StoreToSlot(Slot* slot, InitState init_state);
315 // Load a named property, leaving it in r0. The receiver is passed on the
316 // stack, and remains there.
317 void EmitNamedLoad(Handle<String> name, bool is_contextual);
319 // Load a keyed property, leaving it in r0. The receiver and key are
320 // passed on the stack, and remain there.
321 void EmitKeyedLoad();
323 void LoadFromGlobalSlotCheckExtensions(Slot* slot,
324 TypeofState typeof_state,
327 // Special code for typeof expressions: Unfortunately, we must
328 // be careful when loading the expression in 'typeof'
329 // expressions. We are not allowed to throw reference errors for
330 // non-existing properties of the global object, so we must make it
331 // look like an explicit property access, instead of an access
332 // through the context chain.
333 void LoadTypeofExpression(Expression* x);
335 void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
337 // Generate code that computes a shortcutting logical operation.
338 void GenerateLogicalBooleanOperation(BinaryOperation* node);
340 void GenericBinaryOperation(Token::Value op,
341 OverwriteMode overwrite_mode,
342 int known_rhs = kUnknownIntValue);
343 void VirtualFrameBinaryOperation(Token::Value op,
344 OverwriteMode overwrite_mode,
345 int known_rhs = kUnknownIntValue);
346 void Comparison(Condition cc,
349 bool strict = false);
351 void SmiOperation(Token::Value op,
352 Handle<Object> value,
356 void VirtualFrameSmiOperation(Token::Value op,
357 Handle<Object> value,
361 void CallWithArguments(ZoneList<Expression*>* arguments,
362 CallFunctionFlags flags,
365 // An optimized implementation of expressions of the form
366 // x.apply(y, arguments). We call x the applicand and y the receiver.
367 // The optimization avoids allocating an arguments object if possible.
368 void CallApplyLazy(Expression* applicand,
369 Expression* receiver,
370 VariableProxy* arguments,
374 void Branch(bool if_true, JumpTarget* target);
377 struct InlineRuntimeLUT {
378 void (CodeGenerator::*method)(ZoneList<Expression*>*);
383 static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
384 bool CheckForInlineRuntimeCall(CallRuntime* node);
385 static bool PatchInlineRuntimeEntry(Handle<String> name,
386 const InlineRuntimeLUT& new_entry,
387 InlineRuntimeLUT* old_entry);
389 static Handle<Code> ComputeLazyCompile(int argc);
390 void ProcessDeclarations(ZoneList<Declaration*>* declarations);
392 static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
394 // Declare global variables and functions in the given array of
396 void DeclareGlobals(Handle<FixedArray> pairs);
398 // Instantiate the function based on the shared function info.
399 void InstantiateFunction(Handle<SharedFunctionInfo> function_info);
401 // Support for type checks.
402 void GenerateIsSmi(ZoneList<Expression*>* args);
403 void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
404 void GenerateIsArray(ZoneList<Expression*>* args);
405 void GenerateIsRegExp(ZoneList<Expression*>* args);
406 void GenerateIsObject(ZoneList<Expression*>* args);
407 void GenerateIsFunction(ZoneList<Expression*>* args);
408 void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
410 // Support for construct call checks.
411 void GenerateIsConstructCall(ZoneList<Expression*>* args);
413 // Support for arguments.length and arguments[?].
414 void GenerateArgumentsLength(ZoneList<Expression*>* args);
415 void GenerateArguments(ZoneList<Expression*>* args);
417 // Support for accessing the class and value fields of an object.
418 void GenerateClassOf(ZoneList<Expression*>* args);
419 void GenerateValueOf(ZoneList<Expression*>* args);
420 void GenerateSetValueOf(ZoneList<Expression*>* args);
422 // Fast support for charCodeAt(n).
423 void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
425 // Fast support for string.charAt(n) and string[n].
426 void GenerateCharFromCode(ZoneList<Expression*>* args);
428 // Fast support for object equality testing.
429 void GenerateObjectEquals(ZoneList<Expression*>* args);
431 void GenerateLog(ZoneList<Expression*>* args);
433 // Fast support for Math.random().
434 void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
436 // Fast support for StringAdd.
437 void GenerateStringAdd(ZoneList<Expression*>* args);
439 // Fast support for SubString.
440 void GenerateSubString(ZoneList<Expression*>* args);
442 // Fast support for StringCompare.
443 void GenerateStringCompare(ZoneList<Expression*>* args);
445 // Support for direct calls from JavaScript to native RegExp code.
446 void GenerateRegExpExec(ZoneList<Expression*>* args);
448 void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
450 // Support for fast native caches.
451 void GenerateGetFromCache(ZoneList<Expression*>* args);
453 // Fast support for number to string.
454 void GenerateNumberToString(ZoneList<Expression*>* args);
456 // Fast call for custom callbacks.
457 void GenerateCallFunction(ZoneList<Expression*>* args);
459 // Fast call to math functions.
460 void GenerateMathPow(ZoneList<Expression*>* args);
461 void GenerateMathSin(ZoneList<Expression*>* args);
462 void GenerateMathCos(ZoneList<Expression*>* args);
463 void GenerateMathSqrt(ZoneList<Expression*>* args);
465 // Simple condition analysis.
466 enum ConditionAnalysis {
471 ConditionAnalysis AnalyzeCondition(Expression* cond);
473 // Methods used to indicate which source code is generated for. Source
474 // positions are collected by the assembler and emitted with the relocation
476 void CodeForFunctionPosition(FunctionLiteral* fun);
477 void CodeForReturnPosition(FunctionLiteral* fun);
478 void CodeForStatementPosition(Statement* node);
479 void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
480 void CodeForSourcePosition(int pos);
483 // True if the registers are valid for entry to a block.
484 bool HasValidEntryRegisters();
487 List<DeferredCode*> deferred_;
490 MacroAssembler* masm_; // to generate code
492 CompilationInfo* info_;
494 // Code generation state
495 VirtualFrame* frame_;
496 RegisterAllocator* allocator_;
498 CodeGenState* state_;
502 BreakTarget function_return_;
504 // True if the function return is shadowed (ie, jumping to the target
505 // function_return_ does not jump to the true function return, but rather
506 // to some unlinking code).
507 bool function_return_is_shadowed_;
509 static InlineRuntimeLUT kInlineRuntimeLUT[];
511 friend class VirtualFrame;
512 friend class JumpTarget;
513 friend class Reference;
514 friend class FastCodeGenerator;
515 friend class FullCodeGenerator;
516 friend class FullCodeGenSyntaxChecker;
518 DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
522 class GenericBinaryOpStub : public CodeStub {
524 GenericBinaryOpStub(Token::Value op,
528 int constant_rhs = CodeGenerator::kUnknownIntValue)
533 constant_rhs_(constant_rhs),
534 specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
535 runtime_operands_type_(BinaryOpIC::DEFAULT),
538 GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
539 : op_(OpBits::decode(key)),
540 mode_(ModeBits::decode(key)),
541 lhs_(LhsRegister(RegisterBits::decode(key))),
542 rhs_(RhsRegister(RegisterBits::decode(key))),
543 constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
544 specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
545 runtime_operands_type_(type_info),
554 bool specialized_on_rhs_;
555 BinaryOpIC::TypeInfo runtime_operands_type_;
558 static const int kMaxKnownRhs = 0x40000000;
559 static const int kKnownRhsKeyBits = 6;
561 // Minor key encoding in 17 bits.
562 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
563 class OpBits: public BitField<Token::Value, 2, 6> {};
564 class TypeInfoBits: public BitField<int, 8, 2> {};
565 class RegisterBits: public BitField<bool, 10, 1> {};
566 class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
568 Major MajorKey() { return GenericBinaryOp; }
570 ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
571 (lhs_.is(r1) && rhs_.is(r0)));
572 // Encode the parameters in a unique 18 bit value.
573 return OpBits::encode(op_)
574 | ModeBits::encode(mode_)
575 | KnownIntBits::encode(MinorKeyForKnownInt())
576 | TypeInfoBits::encode(runtime_operands_type_)
577 | RegisterBits::encode(lhs_.is(r0));
580 void Generate(MacroAssembler* masm);
581 void HandleNonSmiBitwiseOp(MacroAssembler* masm, Register lhs, Register rhs);
582 void HandleBinaryOpSlowCases(MacroAssembler* masm,
586 const Builtins::JavaScript& builtin);
587 void GenerateTypeTransition(MacroAssembler* masm);
589 static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
590 if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
591 if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
592 if (op == Token::MOD) {
593 if (constant_rhs <= 1) return false;
594 if (constant_rhs <= 10) return true;
595 if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
601 int MinorKeyForKnownInt() {
602 if (!specialized_on_rhs_) return 0;
603 if (constant_rhs_ <= 10) return constant_rhs_ + 1;
604 ASSERT(IsPowerOf2(constant_rhs_));
606 int d = constant_rhs_;
607 while ((d & 1) == 0) {
611 ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
615 int KnownBitsForMinorKey(int key) {
617 if (key <= 11) return key - 1;
626 Register LhsRegister(bool lhs_is_r0) {
627 return lhs_is_r0 ? r0 : r1;
630 Register RhsRegister(bool lhs_is_r0) {
631 return lhs_is_r0 ? r1 : r0;
634 bool ShouldGenerateSmiCode() {
635 return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
636 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
637 runtime_operands_type_ != BinaryOpIC::STRINGS;
640 bool ShouldGenerateFPCode() {
641 return runtime_operands_type_ != BinaryOpIC::STRINGS;
644 virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
646 virtual InlineCacheState GetICState() {
647 return BinaryOpIC::ToState(runtime_operands_type_);
650 const char* GetName();
654 if (!specialized_on_rhs_) {
655 PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
657 PrintF("GenericBinaryOpStub (%s by %d)\n",
666 class StringStubBase: public CodeStub {
668 // Generate code for copying characters using a simple loop. This should only
669 // be used in places where the number of characters is small and the
670 // additional setup and checking in GenerateCopyCharactersLong adds too much
671 // overhead. Copying of overlapping regions is not supported.
672 // Dest register ends at the position after the last character written.
673 void GenerateCopyCharacters(MacroAssembler* masm,
680 // Generate code for copying a large number of characters. This function
681 // is allowed to spend extra time setting up conditions to make copying
682 // faster. Copying of overlapping regions is not supported.
683 // Dest register ends at the position after the last character written.
684 void GenerateCopyCharactersLong(MacroAssembler* masm,
696 // Probe the symbol table for a two character string. If the string is
697 // not found by probing a jump to the label not_found is performed. This jump
698 // does not guarantee that the string is not in the symbol table. If the
699 // string is found the code falls through with the string in register r0.
700 // Contents of both c1 and c2 registers are modified. At the exit c1 is
701 // guaranteed to contain halfword with low and high bytes equal to
702 // initial contents of c1 and c2 respectively.
703 void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
713 // Generate string hash.
714 void GenerateHashInit(MacroAssembler* masm,
718 void GenerateHashAddCharacter(MacroAssembler* masm,
722 void GenerateHashGetHash(MacroAssembler* masm,
727 // Flag that indicates how to generate code for the stub StringAddStub.
728 enum StringAddFlags {
729 NO_STRING_ADD_FLAGS = 0,
730 NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
734 class StringAddStub: public StringStubBase {
736 explicit StringAddStub(StringAddFlags flags) {
737 string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
741 Major MajorKey() { return StringAdd; }
742 int MinorKey() { return string_check_ ? 0 : 1; }
744 void Generate(MacroAssembler* masm);
746 // Should the stub check whether arguments are strings?
751 class SubStringStub: public StringStubBase {
756 Major MajorKey() { return SubString; }
757 int MinorKey() { return 0; }
759 void Generate(MacroAssembler* masm);
764 class StringCompareStub: public CodeStub {
766 StringCompareStub() { }
768 // Compare two flat ASCII strings and returns result in r0.
769 // Does not use the stack.
770 static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
779 Major MajorKey() { return StringCompare; }
780 int MinorKey() { return 0; }
782 void Generate(MacroAssembler* masm);
786 // This stub can convert a signed int32 to a heap number (double). It does
787 // not work for int32s that are in Smi range! No GC occurs during this stub
788 // so you don't have to set up the frame.
789 class WriteInt32ToHeapNumberStub : public CodeStub {
791 WriteInt32ToHeapNumberStub(Register the_int,
792 Register the_heap_number,
795 the_heap_number_(the_heap_number),
796 scratch_(scratch) { }
800 Register the_heap_number_;
803 // Minor key encoding in 16 bits.
804 class IntRegisterBits: public BitField<int, 0, 4> {};
805 class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
806 class ScratchRegisterBits: public BitField<int, 8, 4> {};
808 Major MajorKey() { return WriteInt32ToHeapNumber; }
810 // Encode the parameters in a unique 16 bit value.
811 return IntRegisterBits::encode(the_int_.code())
812 | HeapNumberRegisterBits::encode(the_heap_number_.code())
813 | ScratchRegisterBits::encode(scratch_.code());
816 void Generate(MacroAssembler* masm);
818 const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
821 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
826 class NumberToStringStub: public CodeStub {
828 NumberToStringStub() { }
830 // Generate code to do a lookup in the number string cache. If the number in
831 // the register object is found in the cache the generated code falls through
832 // with the result in the result register. The object and the result register
833 // can be the same. If the number is not found in the cache the code jumps to
834 // the label not_found with only the content of register object unchanged.
835 static void GenerateLookupNumberStringCache(MacroAssembler* masm,
845 Major MajorKey() { return NumberToString; }
846 int MinorKey() { return 0; }
848 void Generate(MacroAssembler* masm);
850 const char* GetName() { return "NumberToStringStub"; }
854 PrintF("NumberToStringStub\n");
860 } } // namespace v8::internal
862 #endif // V8_ARM_CODEGEN_ARM_H_