1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef V8_ARM_CODEGEN_ARM_H_
29 #define V8_ARM_CODEGEN_ARM_H_
37 // Forward declarations
38 class CompilationInfo;
41 class RegisterAllocator;
44 enum InitState { CONST_INIT, NOT_CONST_INIT };
45 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
46 enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
49 // -------------------------------------------------------------------------
52 // A reference is a C++ stack-allocated object that puts a
53 // reference on the virtual frame. The reference may be consumed
54 // by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
55 // When the lifetime (scope) of a valid reference ends, it must have
56 // been consumed, and be in state UNLOADED.
57 class Reference BASE_EMBEDDED {
59 // The values of the types is important, see size().
60 enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
61 Reference(CodeGenerator* cgen,
62 Expression* expression,
63 bool persist_after_get = false);
66 Expression* expression() const { return expression_; }
67 Type type() const { return type_; }
68 void set_type(Type value) {
69 ASSERT_EQ(ILLEGAL, type_);
74 ASSERT_NE(ILLEGAL, type_);
75 ASSERT_NE(UNLOADED, type_);
78 // The size the reference takes up on the stack.
80 return (type_ < SLOT) ? 0 : type_;
83 bool is_illegal() const { return type_ == ILLEGAL; }
84 bool is_slot() const { return type_ == SLOT; }
85 bool is_property() const { return type_ == NAMED || type_ == KEYED; }
86 bool is_unloaded() const { return type_ == UNLOADED; }
88 // Return the name. Only valid for named property references.
89 Handle<String> GetName();
91 // Generate code to push the value of the reference on top of the
92 // expression stack. The reference is expected to be already on top of
93 // the expression stack, and it is consumed by the call unless the
94 // reference is for a compound assignment.
95 // If the reference is not consumed, it is left in place under its value.
98 // Generate code to store the value on top of the expression stack in the
99 // reference. The reference is expected to be immediately below the value
100 // on the expression stack. The value is stored in the location specified
101 // by the reference, and is left on top of the stack, after the reference
102 // is popped from beneath it (unloaded).
103 void SetValue(InitState init_state);
105 // This is in preparation for something that uses the reference on the stack.
106 // If we need this reference afterwards get then dup it now. Otherwise mark
108 inline void DupIfPersist();
111 CodeGenerator* cgen_;
112 Expression* expression_;
114 // Keep the reference on the stack after get, so it can be used by set later.
115 bool persist_after_get_;
119 // -------------------------------------------------------------------------
120 // Code generation state
122 // The state is passed down the AST by the code generator (and back up, in
123 // the form of the state of the label pair). It is threaded through the
124 // call stack. Constructing a state implicitly pushes it on the owning code
125 // generator's stack of states, and destroying one implicitly pops it.
127 class CodeGenState BASE_EMBEDDED {
129 // Create an initial code generator state. Destroying the initial state
130 // leaves the code generator with a NULL state.
131 explicit CodeGenState(CodeGenerator* owner);
133 // Destroy a code generator state and restore the owning code generator's
135 virtual ~CodeGenState();
137 virtual JumpTarget* true_target() const { return NULL; }
138 virtual JumpTarget* false_target() const { return NULL; }
141 inline CodeGenerator* owner() { return owner_; }
142 inline CodeGenState* previous() const { return previous_; }
145 CodeGenerator* owner_;
146 CodeGenState* previous_;
150 class ConditionCodeGenState : public CodeGenState {
152 // Create a code generator state based on a code generator's current
153 // state. The new state has its own pair of branch labels.
154 ConditionCodeGenState(CodeGenerator* owner,
155 JumpTarget* true_target,
156 JumpTarget* false_target);
158 virtual JumpTarget* true_target() const { return true_target_; }
159 virtual JumpTarget* false_target() const { return false_target_; }
162 JumpTarget* true_target_;
163 JumpTarget* false_target_;
167 class TypeInfoCodeGenState : public CodeGenState {
169 TypeInfoCodeGenState(CodeGenerator* owner,
172 ~TypeInfoCodeGenState();
174 virtual JumpTarget* true_target() const { return previous()->true_target(); }
175 virtual JumpTarget* false_target() const {
176 return previous()->false_target();
181 TypeInfo old_type_info_;
185 // -------------------------------------------------------------------------
186 // Arguments allocation mode
188 enum ArgumentsAllocationMode {
189 NO_ARGUMENTS_ALLOCATION,
190 EAGER_ARGUMENTS_ALLOCATION,
191 LAZY_ARGUMENTS_ALLOCATION
195 // Different nop operations are used by the code generator to detect certain
196 // states of the generated code.
197 enum NopMarkerTypes {
199 PROPERTY_ACCESS_INLINED
203 // -------------------------------------------------------------------------
206 class CodeGenerator: public AstVisitor {
208 // Takes a function literal, generates code for it. This function should only
209 // be called by compiler.cc.
210 static Handle<Code> MakeCode(CompilationInfo* info);
212 // Printing of AST, etc. as requested by flags.
213 static void MakeCodePrologue(CompilationInfo* info);
215 // Allocate and install the code.
216 static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
218 CompilationInfo* info);
220 #ifdef ENABLE_LOGGING_AND_PROFILING
221 static bool ShouldGenerateLog(Expression* type);
224 static void SetFunctionInfo(Handle<JSFunction> fun,
225 FunctionLiteral* lit,
227 Handle<Script> script);
229 static bool RecordPositions(MacroAssembler* masm,
231 bool right_here = false);
234 MacroAssembler* masm() { return masm_; }
235 VirtualFrame* frame() const { return frame_; }
236 inline Handle<Script> script();
238 bool has_valid_frame() const { return frame_ != NULL; }
240 // Set the virtual frame to be new_frame, with non-frame register
241 // reference counts given by non_frame_registers. The non-frame
242 // register reference counts of the old frame are returned in
243 // non_frame_registers.
244 void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
248 RegisterAllocator* allocator() const { return allocator_; }
250 CodeGenState* state() { return state_; }
251 void set_state(CodeGenState* state) { state_ = state; }
253 TypeInfo type_info(Slot* slot) {
254 int index = NumberOfSlot(slot);
255 if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
256 return (*type_info_)[index];
259 TypeInfo set_type_info(Slot* slot, TypeInfo info) {
260 int index = NumberOfSlot(slot);
261 ASSERT(index >= kInvalidSlotNumber);
262 if (index != kInvalidSlotNumber) {
263 TypeInfo previous_value = (*type_info_)[index];
264 (*type_info_)[index] = info;
265 return previous_value;
267 return TypeInfo::Unknown();
270 void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
272 static const int kUnknownIntValue = -1;
274 // If the name is an inline runtime function call return the number of
275 // expected arguments. Otherwise return -1.
276 static int InlineRuntimeCallArgumentsCount(Handle<String> name);
278 // Constants related to patching of inlined load/store.
279 static const int kInlinedKeyedLoadInstructionsAfterPatch = 17;
280 static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
283 // Construction/Destruction
284 explicit CodeGenerator(MacroAssembler* masm);
287 inline bool is_eval();
288 inline Scope* scope();
290 // Generating deferred code.
291 void ProcessDeferred();
293 static const int kInvalidSlotNumber = -1;
295 int NumberOfSlot(Slot* slot);
298 bool has_cc() const { return cc_reg_ != al; }
299 JumpTarget* true_target() const { return state_->true_target(); }
300 JumpTarget* false_target() const { return state_->false_target(); }
302 // Track loop nesting level.
303 int loop_nesting() const { return loop_nesting_; }
304 void IncrementLoopNesting() { loop_nesting_++; }
305 void DecrementLoopNesting() { loop_nesting_--; }
308 void VisitStatements(ZoneList<Statement*>* statements);
310 #define DEF_VISIT(type) \
311 void Visit##type(type* node);
312 AST_NODE_LIST(DEF_VISIT)
315 // Main code generation function
316 void Generate(CompilationInfo* info);
318 // Generate the return sequence code. Should be called no more than
319 // once per compiled function, immediately after binding the return
320 // target (which can not be done more than once). The return value should
322 void GenerateReturnSequence();
324 // Returns the arguments allocation mode.
325 ArgumentsAllocationMode ArgumentsMode();
327 // Store the arguments object and allocate it if necessary.
328 void StoreArgumentsObject(bool initial);
330 // The following are used by class Reference.
331 void LoadReference(Reference* ref);
332 void UnloadReference(Reference* ref);
334 static MemOperand ContextOperand(Register context, int index) {
335 return MemOperand(context, Context::SlotOffset(index));
338 MemOperand SlotOperand(Slot* slot, Register tmp);
340 MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
346 static MemOperand GlobalObject() {
347 return ContextOperand(cp, Context::GLOBAL_INDEX);
350 void LoadCondition(Expression* x,
351 JumpTarget* true_target,
352 JumpTarget* false_target,
354 void Load(Expression* expr);
356 void LoadGlobalReceiver(Register scratch);
358 // Read a value from a slot and leave it on top of the expression stack.
359 void LoadFromSlot(Slot* slot, TypeofState typeof_state);
360 void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
362 // Store the value on top of the stack to a slot.
363 void StoreToSlot(Slot* slot, InitState init_state);
365 // Support for compiling assignment expressions.
366 void EmitSlotAssignment(Assignment* node);
367 void EmitNamedPropertyAssignment(Assignment* node);
368 void EmitKeyedPropertyAssignment(Assignment* node);
370 // Load a named property, returning it in r0. The receiver is passed on the
371 // stack, and remains there.
372 void EmitNamedLoad(Handle<String> name, bool is_contextual);
374 // Store to a named property. If the store is contextual, value is passed on
375 // the frame and consumed. Otherwise, receiver and value are passed on the
376 // frame and consumed. The result is returned in r0.
377 void EmitNamedStore(Handle<String> name, bool is_contextual);
379 // Load a keyed property, leaving it in r0. The receiver and key are
380 // passed on the stack, and remain there.
381 void EmitKeyedLoad();
383 // Store a keyed property. Key and receiver are on the stack and the value is
384 // in r0. Result is returned in r0.
385 void EmitKeyedStore(StaticType* key_type);
387 void LoadFromGlobalSlotCheckExtensions(Slot* slot,
388 TypeofState typeof_state,
391 // Support for loading from local/global variables and arguments
392 // whose location is known unless they are shadowed by
393 // eval-introduced bindings. Generates no code for unsupported slot
394 // types and therefore expects to fall through to the slow jump target.
395 void EmitDynamicLoadFromSlotFastCase(Slot* slot,
396 TypeofState typeof_state,
400 // Special code for typeof expressions: Unfortunately, we must
401 // be careful when loading the expression in 'typeof'
402 // expressions. We are not allowed to throw reference errors for
403 // non-existing properties of the global object, so we must make it
404 // look like an explicit property access, instead of an access
405 // through the context chain.
406 void LoadTypeofExpression(Expression* x);
408 void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
410 // Generate code that computes a shortcutting logical operation.
411 void GenerateLogicalBooleanOperation(BinaryOperation* node);
413 void GenericBinaryOperation(Token::Value op,
414 OverwriteMode overwrite_mode,
415 GenerateInlineSmi inline_smi,
416 int known_rhs = kUnknownIntValue);
417 void Comparison(Condition cc,
420 bool strict = false);
422 void SmiOperation(Token::Value op,
423 Handle<Object> value,
427 void CallWithArguments(ZoneList<Expression*>* arguments,
428 CallFunctionFlags flags,
431 // An optimized implementation of expressions of the form
432 // x.apply(y, arguments). We call x the applicand and y the receiver.
433 // The optimization avoids allocating an arguments object if possible.
434 void CallApplyLazy(Expression* applicand,
435 Expression* receiver,
436 VariableProxy* arguments,
440 void Branch(bool if_true, JumpTarget* target);
443 struct InlineRuntimeLUT {
444 void (CodeGenerator::*method)(ZoneList<Expression*>*);
449 static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
450 bool CheckForInlineRuntimeCall(CallRuntime* node);
451 static bool PatchInlineRuntimeEntry(Handle<String> name,
452 const InlineRuntimeLUT& new_entry,
453 InlineRuntimeLUT* old_entry);
455 static Handle<Code> ComputeLazyCompile(int argc);
456 void ProcessDeclarations(ZoneList<Declaration*>* declarations);
458 static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
460 static Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
462 // Declare global variables and functions in the given array of
464 void DeclareGlobals(Handle<FixedArray> pairs);
466 // Instantiate the function based on the shared function info.
467 void InstantiateFunction(Handle<SharedFunctionInfo> function_info);
469 // Support for type checks.
470 void GenerateIsSmi(ZoneList<Expression*>* args);
471 void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
472 void GenerateIsArray(ZoneList<Expression*>* args);
473 void GenerateIsRegExp(ZoneList<Expression*>* args);
474 void GenerateIsObject(ZoneList<Expression*>* args);
475 void GenerateIsFunction(ZoneList<Expression*>* args);
476 void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
478 // Support for construct call checks.
479 void GenerateIsConstructCall(ZoneList<Expression*>* args);
481 // Support for arguments.length and arguments[?].
482 void GenerateArgumentsLength(ZoneList<Expression*>* args);
483 void GenerateArguments(ZoneList<Expression*>* args);
485 // Support for accessing the class and value fields of an object.
486 void GenerateClassOf(ZoneList<Expression*>* args);
487 void GenerateValueOf(ZoneList<Expression*>* args);
488 void GenerateSetValueOf(ZoneList<Expression*>* args);
490 // Fast support for charCodeAt(n).
491 void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
493 // Fast support for string.charAt(n) and string[n].
494 void GenerateStringCharFromCode(ZoneList<Expression*>* args);
496 // Fast support for string.charAt(n) and string[n].
497 void GenerateStringCharAt(ZoneList<Expression*>* args);
499 // Fast support for object equality testing.
500 void GenerateObjectEquals(ZoneList<Expression*>* args);
502 void GenerateLog(ZoneList<Expression*>* args);
504 // Fast support for Math.random().
505 void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
507 // Fast support for StringAdd.
508 void GenerateStringAdd(ZoneList<Expression*>* args);
510 // Fast support for SubString.
511 void GenerateSubString(ZoneList<Expression*>* args);
513 // Fast support for StringCompare.
514 void GenerateStringCompare(ZoneList<Expression*>* args);
516 // Support for direct calls from JavaScript to native RegExp code.
517 void GenerateRegExpExec(ZoneList<Expression*>* args);
519 void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
521 // Support for fast native caches.
522 void GenerateGetFromCache(ZoneList<Expression*>* args);
524 // Fast support for number to string.
525 void GenerateNumberToString(ZoneList<Expression*>* args);
527 // Fast swapping of elements.
528 void GenerateSwapElements(ZoneList<Expression*>* args);
530 // Fast call for custom callbacks.
531 void GenerateCallFunction(ZoneList<Expression*>* args);
533 // Fast call to math functions.
534 void GenerateMathPow(ZoneList<Expression*>* args);
535 void GenerateMathSin(ZoneList<Expression*>* args);
536 void GenerateMathCos(ZoneList<Expression*>* args);
537 void GenerateMathSqrt(ZoneList<Expression*>* args);
539 // Simple condition analysis.
540 enum ConditionAnalysis {
545 ConditionAnalysis AnalyzeCondition(Expression* cond);
547 // Methods used to indicate which source code is generated for. Source
548 // positions are collected by the assembler and emitted with the relocation
550 void CodeForFunctionPosition(FunctionLiteral* fun);
551 void CodeForReturnPosition(FunctionLiteral* fun);
552 void CodeForStatementPosition(Statement* node);
553 void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
554 void CodeForSourcePosition(int pos);
557 // True if the registers are valid for entry to a block.
558 bool HasValidEntryRegisters();
561 List<DeferredCode*> deferred_;
564 MacroAssembler* masm_; // to generate code
566 CompilationInfo* info_;
568 // Code generation state
569 VirtualFrame* frame_;
570 RegisterAllocator* allocator_;
572 CodeGenState* state_;
575 Vector<TypeInfo>* type_info_;
578 BreakTarget function_return_;
580 // True if the function return is shadowed (ie, jumping to the target
581 // function_return_ does not jump to the true function return, but rather
582 // to some unlinking code).
583 bool function_return_is_shadowed_;
585 static InlineRuntimeLUT kInlineRuntimeLUT[];
587 friend class VirtualFrame;
588 friend class JumpTarget;
589 friend class Reference;
590 friend class FastCodeGenerator;
591 friend class FullCodeGenerator;
592 friend class FullCodeGenSyntaxChecker;
594 DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
598 // Compute a transcendental math function natively, or call the
599 // TranscendentalCache runtime function.
600 class TranscendentalCacheStub: public CodeStub {
602 explicit TranscendentalCacheStub(TranscendentalCache::Type type)
604 void Generate(MacroAssembler* masm);
606 TranscendentalCache::Type type_;
607 Major MajorKey() { return TranscendentalCache; }
608 int MinorKey() { return type_; }
609 Runtime::FunctionId RuntimeFunction();
613 class GenericBinaryOpStub : public CodeStub {
615 GenericBinaryOpStub(Token::Value op,
619 int constant_rhs = CodeGenerator::kUnknownIntValue)
624 constant_rhs_(constant_rhs),
625 specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
626 runtime_operands_type_(BinaryOpIC::DEFAULT),
629 GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
630 : op_(OpBits::decode(key)),
631 mode_(ModeBits::decode(key)),
632 lhs_(LhsRegister(RegisterBits::decode(key))),
633 rhs_(RhsRegister(RegisterBits::decode(key))),
634 constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
635 specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
636 runtime_operands_type_(type_info),
645 bool specialized_on_rhs_;
646 BinaryOpIC::TypeInfo runtime_operands_type_;
649 static const int kMaxKnownRhs = 0x40000000;
650 static const int kKnownRhsKeyBits = 6;
652 // Minor key encoding in 17 bits.
653 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
654 class OpBits: public BitField<Token::Value, 2, 6> {};
655 class TypeInfoBits: public BitField<int, 8, 2> {};
656 class RegisterBits: public BitField<bool, 10, 1> {};
657 class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
659 Major MajorKey() { return GenericBinaryOp; }
661 ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
662 (lhs_.is(r1) && rhs_.is(r0)));
663 // Encode the parameters in a unique 18 bit value.
664 return OpBits::encode(op_)
665 | ModeBits::encode(mode_)
666 | KnownIntBits::encode(MinorKeyForKnownInt())
667 | TypeInfoBits::encode(runtime_operands_type_)
668 | RegisterBits::encode(lhs_.is(r0));
671 void Generate(MacroAssembler* masm);
672 void HandleNonSmiBitwiseOp(MacroAssembler* masm,
675 void HandleBinaryOpSlowCases(MacroAssembler* masm,
679 const Builtins::JavaScript& builtin);
680 void GenerateTypeTransition(MacroAssembler* masm);
682 static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
683 if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
684 if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
685 if (op == Token::MOD) {
686 if (constant_rhs <= 1) return false;
687 if (constant_rhs <= 10) return true;
688 if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
694 int MinorKeyForKnownInt() {
695 if (!specialized_on_rhs_) return 0;
696 if (constant_rhs_ <= 10) return constant_rhs_ + 1;
697 ASSERT(IsPowerOf2(constant_rhs_));
699 int d = constant_rhs_;
700 while ((d & 1) == 0) {
704 ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
708 int KnownBitsForMinorKey(int key) {
710 if (key <= 11) return key - 1;
719 Register LhsRegister(bool lhs_is_r0) {
720 return lhs_is_r0 ? r0 : r1;
723 Register RhsRegister(bool lhs_is_r0) {
724 return lhs_is_r0 ? r1 : r0;
727 bool ShouldGenerateSmiCode() {
728 return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
729 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
730 runtime_operands_type_ != BinaryOpIC::STRINGS;
733 bool ShouldGenerateFPCode() {
734 return runtime_operands_type_ != BinaryOpIC::STRINGS;
737 virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
739 virtual InlineCacheState GetICState() {
740 return BinaryOpIC::ToState(runtime_operands_type_);
743 const char* GetName();
747 if (!specialized_on_rhs_) {
748 PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
750 PrintF("GenericBinaryOpStub (%s by %d)\n",
759 class StringHelper : public AllStatic {
761 // Generate code for copying characters using a simple loop. This should only
762 // be used in places where the number of characters is small and the
763 // additional setup and checking in GenerateCopyCharactersLong adds too much
764 // overhead. Copying of overlapping regions is not supported.
765 // Dest register ends at the position after the last character written.
766 static void GenerateCopyCharacters(MacroAssembler* masm,
773 // Generate code for copying a large number of characters. This function
774 // is allowed to spend extra time setting up conditions to make copying
775 // faster. Copying of overlapping regions is not supported.
776 // Dest register ends at the position after the last character written.
777 static void GenerateCopyCharactersLong(MacroAssembler* masm,
789 // Probe the symbol table for a two character string. If the string is
790 // not found by probing a jump to the label not_found is performed. This jump
791 // does not guarantee that the string is not in the symbol table. If the
792 // string is found the code falls through with the string in register r0.
793 // Contents of both c1 and c2 registers are modified. At the exit c1 is
794 // guaranteed to contain halfword with low and high bytes equal to
795 // initial contents of c1 and c2 respectively.
796 static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
806 // Generate string hash.
807 static void GenerateHashInit(MacroAssembler* masm,
811 static void GenerateHashAddCharacter(MacroAssembler* masm,
815 static void GenerateHashGetHash(MacroAssembler* masm,
819 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
823 // Flag that indicates how to generate code for the stub StringAddStub.
824 enum StringAddFlags {
825 NO_STRING_ADD_FLAGS = 0,
826 NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
830 class StringAddStub: public CodeStub {
832 explicit StringAddStub(StringAddFlags flags) {
833 string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
837 Major MajorKey() { return StringAdd; }
838 int MinorKey() { return string_check_ ? 0 : 1; }
840 void Generate(MacroAssembler* masm);
842 // Should the stub check whether arguments are strings?
847 class SubStringStub: public CodeStub {
852 Major MajorKey() { return SubString; }
853 int MinorKey() { return 0; }
855 void Generate(MacroAssembler* masm);
860 class StringCompareStub: public CodeStub {
862 StringCompareStub() { }
864 // Compare two flat ASCII strings and returns result in r0.
865 // Does not use the stack.
866 static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
875 Major MajorKey() { return StringCompare; }
876 int MinorKey() { return 0; }
878 void Generate(MacroAssembler* masm);
882 // This stub can convert a signed int32 to a heap number (double). It does
883 // not work for int32s that are in Smi range! No GC occurs during this stub
884 // so you don't have to set up the frame.
885 class WriteInt32ToHeapNumberStub : public CodeStub {
887 WriteInt32ToHeapNumberStub(Register the_int,
888 Register the_heap_number,
891 the_heap_number_(the_heap_number),
892 scratch_(scratch) { }
896 Register the_heap_number_;
899 // Minor key encoding in 16 bits.
900 class IntRegisterBits: public BitField<int, 0, 4> {};
901 class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
902 class ScratchRegisterBits: public BitField<int, 8, 4> {};
904 Major MajorKey() { return WriteInt32ToHeapNumber; }
906 // Encode the parameters in a unique 16 bit value.
907 return IntRegisterBits::encode(the_int_.code())
908 | HeapNumberRegisterBits::encode(the_heap_number_.code())
909 | ScratchRegisterBits::encode(scratch_.code());
912 void Generate(MacroAssembler* masm);
914 const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
917 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
922 class NumberToStringStub: public CodeStub {
924 NumberToStringStub() { }
926 // Generate code to do a lookup in the number string cache. If the number in
927 // the register object is found in the cache the generated code falls through
928 // with the result in the result register. The object and the result register
929 // can be the same. If the number is not found in the cache the code jumps to
930 // the label not_found with only the content of register object unchanged.
931 static void GenerateLookupNumberStringCache(MacroAssembler* masm,
941 Major MajorKey() { return NumberToString; }
942 int MinorKey() { return 0; }
944 void Generate(MacroAssembler* masm);
946 const char* GetName() { return "NumberToStringStub"; }
950 PrintF("NumberToStringStub\n");
956 class RecordWriteStub : public CodeStub {
958 RecordWriteStub(Register object, Register offset, Register scratch)
959 : object_(object), offset_(offset), scratch_(scratch) { }
961 void Generate(MacroAssembler* masm);
970 PrintF("RecordWriteStub (object reg %d), (offset reg %d),"
971 " (scratch reg %d)\n",
972 object_.code(), offset_.code(), scratch_.code());
976 // Minor key encoding in 12 bits. 4 bits for each of the three
977 // registers (object, offset and scratch) OOOOAAAASSSS.
978 class ScratchBits: public BitField<uint32_t, 0, 4> {};
979 class OffsetBits: public BitField<uint32_t, 4, 4> {};
980 class ObjectBits: public BitField<uint32_t, 8, 4> {};
982 Major MajorKey() { return RecordWrite; }
985 // Encode the registers.
986 return ObjectBits::encode(object_.code()) |
987 OffsetBits::encode(offset_.code()) |
988 ScratchBits::encode(scratch_.code());
993 } } // namespace v8::internal
995 #endif // V8_ARM_CODEGEN_ARM_H_