1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef V8_ARM_CODEGEN_ARM_H_
29 #define V8_ARM_CODEGEN_ARM_H_
37 // Forward declarations
38 class CompilationInfo;
41 class RegisterAllocator;
44 enum InitState { CONST_INIT, NOT_CONST_INIT };
45 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
46 enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
49 // -------------------------------------------------------------------------
52 // A reference is a C++ stack-allocated object that puts a
53 // reference on the virtual frame. The reference may be consumed
54 // by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
55 // When the lifetime (scope) of a valid reference ends, it must have
56 // been consumed, and be in state UNLOADED.
57 class Reference BASE_EMBEDDED {
59 // The values of the types is important, see size().
60 enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
61 Reference(CodeGenerator* cgen,
62 Expression* expression,
63 bool persist_after_get = false);
66 Expression* expression() const { return expression_; }
67 Type type() const { return type_; }
68 void set_type(Type value) {
69 ASSERT_EQ(ILLEGAL, type_);
74 ASSERT_NE(ILLEGAL, type_);
75 ASSERT_NE(UNLOADED, type_);
78 // The size the reference takes up on the stack.
80 return (type_ < SLOT) ? 0 : type_;
83 bool is_illegal() const { return type_ == ILLEGAL; }
84 bool is_slot() const { return type_ == SLOT; }
85 bool is_property() const { return type_ == NAMED || type_ == KEYED; }
86 bool is_unloaded() const { return type_ == UNLOADED; }
88 // Return the name. Only valid for named property references.
89 Handle<String> GetName();
91 // Generate code to push the value of the reference on top of the
92 // expression stack. The reference is expected to be already on top of
93 // the expression stack, and it is consumed by the call unless the
94 // reference is for a compound assignment.
95 // If the reference is not consumed, it is left in place under its value.
98 // Generate code to store the value on top of the expression stack in the
99 // reference. The reference is expected to be immediately below the value
100 // on the expression stack. The value is stored in the location specified
101 // by the reference, and is left on top of the stack, after the reference
102 // is popped from beneath it (unloaded).
103 void SetValue(InitState init_state);
105 // This is in preparation for something that uses the reference on the stack.
106 // If we need this reference afterwards get then dup it now. Otherwise mark
108 inline void DupIfPersist();
111 CodeGenerator* cgen_;
112 Expression* expression_;
114 // Keep the reference on the stack after get, so it can be used by set later.
115 bool persist_after_get_;
119 // -------------------------------------------------------------------------
120 // Code generation state
122 // The state is passed down the AST by the code generator (and back up, in
123 // the form of the state of the label pair). It is threaded through the
124 // call stack. Constructing a state implicitly pushes it on the owning code
125 // generator's stack of states, and destroying one implicitly pops it.
127 class CodeGenState BASE_EMBEDDED {
129 // Create an initial code generator state. Destroying the initial state
130 // leaves the code generator with a NULL state.
131 explicit CodeGenState(CodeGenerator* owner);
133 // Destroy a code generator state and restore the owning code generator's
135 virtual ~CodeGenState();
137 virtual JumpTarget* true_target() const { return NULL; }
138 virtual JumpTarget* false_target() const { return NULL; }
141 inline CodeGenerator* owner() { return owner_; }
142 inline CodeGenState* previous() const { return previous_; }
145 CodeGenerator* owner_;
146 CodeGenState* previous_;
150 class ConditionCodeGenState : public CodeGenState {
152 // Create a code generator state based on a code generator's current
153 // state. The new state has its own pair of branch labels.
154 ConditionCodeGenState(CodeGenerator* owner,
155 JumpTarget* true_target,
156 JumpTarget* false_target);
158 virtual JumpTarget* true_target() const { return true_target_; }
159 virtual JumpTarget* false_target() const { return false_target_; }
162 JumpTarget* true_target_;
163 JumpTarget* false_target_;
167 class TypeInfoCodeGenState : public CodeGenState {
169 TypeInfoCodeGenState(CodeGenerator* owner,
172 ~TypeInfoCodeGenState();
174 virtual JumpTarget* true_target() const { return previous()->true_target(); }
175 virtual JumpTarget* false_target() const {
176 return previous()->false_target();
181 TypeInfo old_type_info_;
185 // -------------------------------------------------------------------------
186 // Arguments allocation mode
188 enum ArgumentsAllocationMode {
189 NO_ARGUMENTS_ALLOCATION,
190 EAGER_ARGUMENTS_ALLOCATION,
191 LAZY_ARGUMENTS_ALLOCATION
195 // Different nop operations are used by the code generator to detect certain
196 // states of the generated code.
197 enum NopMarkerTypes {
199 PROPERTY_ACCESS_INLINED
203 // -------------------------------------------------------------------------
206 class CodeGenerator: public AstVisitor {
208 // Takes a function literal, generates code for it. This function should only
209 // be called by compiler.cc.
210 static Handle<Code> MakeCode(CompilationInfo* info);
212 // Printing of AST, etc. as requested by flags.
213 static void MakeCodePrologue(CompilationInfo* info);
215 // Allocate and install the code.
216 static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
218 CompilationInfo* info);
220 #ifdef ENABLE_LOGGING_AND_PROFILING
221 static bool ShouldGenerateLog(Expression* type);
224 static void SetFunctionInfo(Handle<JSFunction> fun,
225 FunctionLiteral* lit,
227 Handle<Script> script);
229 static bool RecordPositions(MacroAssembler* masm,
231 bool right_here = false);
234 MacroAssembler* masm() { return masm_; }
235 VirtualFrame* frame() const { return frame_; }
236 inline Handle<Script> script();
238 bool has_valid_frame() const { return frame_ != NULL; }
240 // Set the virtual frame to be new_frame, with non-frame register
241 // reference counts given by non_frame_registers. The non-frame
242 // register reference counts of the old frame are returned in
243 // non_frame_registers.
244 void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
248 RegisterAllocator* allocator() const { return allocator_; }
250 CodeGenState* state() { return state_; }
251 void set_state(CodeGenState* state) { state_ = state; }
253 TypeInfo type_info(Slot* slot) {
254 int index = NumberOfSlot(slot);
255 if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
256 return (*type_info_)[index];
259 TypeInfo set_type_info(Slot* slot, TypeInfo info) {
260 int index = NumberOfSlot(slot);
261 ASSERT(index >= kInvalidSlotNumber);
262 if (index != kInvalidSlotNumber) {
263 TypeInfo previous_value = (*type_info_)[index];
264 (*type_info_)[index] = info;
265 return previous_value;
267 return TypeInfo::Unknown();
270 void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
272 static const int kUnknownIntValue = -1;
274 // If the name is an inline runtime function call return the number of
275 // expected arguments. Otherwise return -1.
276 static int InlineRuntimeCallArgumentsCount(Handle<String> name);
278 // Constants related to patching of inlined load/store.
279 static const int kInlinedKeyedLoadInstructionsAfterPatch = 17;
280 static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
283 // Construction/Destruction
284 explicit CodeGenerator(MacroAssembler* masm);
287 inline bool is_eval();
288 inline Scope* scope();
290 // Generating deferred code.
291 void ProcessDeferred();
293 static const int kInvalidSlotNumber = -1;
295 int NumberOfSlot(Slot* slot);
298 bool has_cc() const { return cc_reg_ != al; }
299 JumpTarget* true_target() const { return state_->true_target(); }
300 JumpTarget* false_target() const { return state_->false_target(); }
302 // Track loop nesting level.
303 int loop_nesting() const { return loop_nesting_; }
304 void IncrementLoopNesting() { loop_nesting_++; }
305 void DecrementLoopNesting() { loop_nesting_--; }
308 void VisitStatements(ZoneList<Statement*>* statements);
310 #define DEF_VISIT(type) \
311 void Visit##type(type* node);
312 AST_NODE_LIST(DEF_VISIT)
315 // Main code generation function
316 void Generate(CompilationInfo* info);
318 // Returns the arguments allocation mode.
319 ArgumentsAllocationMode ArgumentsMode();
321 // Store the arguments object and allocate it if necessary.
322 void StoreArgumentsObject(bool initial);
324 // The following are used by class Reference.
325 void LoadReference(Reference* ref);
326 void UnloadReference(Reference* ref);
328 static MemOperand ContextOperand(Register context, int index) {
329 return MemOperand(context, Context::SlotOffset(index));
332 MemOperand SlotOperand(Slot* slot, Register tmp);
334 MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
340 static MemOperand GlobalObject() {
341 return ContextOperand(cp, Context::GLOBAL_INDEX);
344 void LoadCondition(Expression* x,
345 JumpTarget* true_target,
346 JumpTarget* false_target,
348 void Load(Expression* expr);
350 void LoadGlobalReceiver(Register scratch);
352 // Read a value from a slot and leave it on top of the expression stack.
353 void LoadFromSlot(Slot* slot, TypeofState typeof_state);
354 void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
356 // Store the value on top of the stack to a slot.
357 void StoreToSlot(Slot* slot, InitState init_state);
359 // Support for compiling assignment expressions.
360 void EmitSlotAssignment(Assignment* node);
361 void EmitNamedPropertyAssignment(Assignment* node);
362 void EmitKeyedPropertyAssignment(Assignment* node);
364 // Load a named property, returning it in r0. The receiver is passed on the
365 // stack, and remains there.
366 void EmitNamedLoad(Handle<String> name, bool is_contextual);
368 // Store to a named property. If the store is contextual, value is passed on
369 // the frame and consumed. Otherwise, receiver and value are passed on the
370 // frame and consumed. The result is returned in r0.
371 void EmitNamedStore(Handle<String> name, bool is_contextual);
373 // Load a keyed property, leaving it in r0. The receiver and key are
374 // passed on the stack, and remain there.
375 void EmitKeyedLoad();
377 // Store a keyed property. Key and receiver are on the stack and the value is
378 // in r0. Result is returned in r0.
379 void EmitKeyedStore(StaticType* key_type);
381 void LoadFromGlobalSlotCheckExtensions(Slot* slot,
382 TypeofState typeof_state,
385 // Support for loading from local/global variables and arguments
386 // whose location is known unless they are shadowed by
387 // eval-introduced bindings. Generates no code for unsupported slot
388 // types and therefore expects to fall through to the slow jump target.
389 void EmitDynamicLoadFromSlotFastCase(Slot* slot,
390 TypeofState typeof_state,
394 // Special code for typeof expressions: Unfortunately, we must
395 // be careful when loading the expression in 'typeof'
396 // expressions. We are not allowed to throw reference errors for
397 // non-existing properties of the global object, so we must make it
398 // look like an explicit property access, instead of an access
399 // through the context chain.
400 void LoadTypeofExpression(Expression* x);
402 void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
404 // Generate code that computes a shortcutting logical operation.
405 void GenerateLogicalBooleanOperation(BinaryOperation* node);
407 void GenericBinaryOperation(Token::Value op,
408 OverwriteMode overwrite_mode,
409 GenerateInlineSmi inline_smi,
410 int known_rhs = kUnknownIntValue);
411 void Comparison(Condition cc,
414 bool strict = false);
416 void SmiOperation(Token::Value op,
417 Handle<Object> value,
421 void CallWithArguments(ZoneList<Expression*>* arguments,
422 CallFunctionFlags flags,
425 // An optimized implementation of expressions of the form
426 // x.apply(y, arguments). We call x the applicand and y the receiver.
427 // The optimization avoids allocating an arguments object if possible.
428 void CallApplyLazy(Expression* applicand,
429 Expression* receiver,
430 VariableProxy* arguments,
434 void Branch(bool if_true, JumpTarget* target);
437 struct InlineRuntimeLUT {
438 void (CodeGenerator::*method)(ZoneList<Expression*>*);
443 static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
444 bool CheckForInlineRuntimeCall(CallRuntime* node);
445 static bool PatchInlineRuntimeEntry(Handle<String> name,
446 const InlineRuntimeLUT& new_entry,
447 InlineRuntimeLUT* old_entry);
449 static Handle<Code> ComputeLazyCompile(int argc);
450 void ProcessDeclarations(ZoneList<Declaration*>* declarations);
452 static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
454 static Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
456 // Declare global variables and functions in the given array of
458 void DeclareGlobals(Handle<FixedArray> pairs);
460 // Instantiate the function based on the shared function info.
461 void InstantiateFunction(Handle<SharedFunctionInfo> function_info);
463 // Support for type checks.
464 void GenerateIsSmi(ZoneList<Expression*>* args);
465 void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
466 void GenerateIsArray(ZoneList<Expression*>* args);
467 void GenerateIsRegExp(ZoneList<Expression*>* args);
468 void GenerateIsObject(ZoneList<Expression*>* args);
469 void GenerateIsFunction(ZoneList<Expression*>* args);
470 void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
472 // Support for construct call checks.
473 void GenerateIsConstructCall(ZoneList<Expression*>* args);
475 // Support for arguments.length and arguments[?].
476 void GenerateArgumentsLength(ZoneList<Expression*>* args);
477 void GenerateArguments(ZoneList<Expression*>* args);
479 // Support for accessing the class and value fields of an object.
480 void GenerateClassOf(ZoneList<Expression*>* args);
481 void GenerateValueOf(ZoneList<Expression*>* args);
482 void GenerateSetValueOf(ZoneList<Expression*>* args);
484 // Fast support for charCodeAt(n).
485 void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
487 // Fast support for string.charAt(n) and string[n].
488 void GenerateStringCharFromCode(ZoneList<Expression*>* args);
490 // Fast support for string.charAt(n) and string[n].
491 void GenerateStringCharAt(ZoneList<Expression*>* args);
493 // Fast support for object equality testing.
494 void GenerateObjectEquals(ZoneList<Expression*>* args);
496 void GenerateLog(ZoneList<Expression*>* args);
498 // Fast support for Math.random().
499 void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
501 // Fast support for StringAdd.
502 void GenerateStringAdd(ZoneList<Expression*>* args);
504 // Fast support for SubString.
505 void GenerateSubString(ZoneList<Expression*>* args);
507 // Fast support for StringCompare.
508 void GenerateStringCompare(ZoneList<Expression*>* args);
510 // Support for direct calls from JavaScript to native RegExp code.
511 void GenerateRegExpExec(ZoneList<Expression*>* args);
513 void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
515 // Support for fast native caches.
516 void GenerateGetFromCache(ZoneList<Expression*>* args);
518 // Fast support for number to string.
519 void GenerateNumberToString(ZoneList<Expression*>* args);
521 // Fast swapping of elements.
522 void GenerateSwapElements(ZoneList<Expression*>* args);
524 // Fast call for custom callbacks.
525 void GenerateCallFunction(ZoneList<Expression*>* args);
527 // Fast call to math functions.
528 void GenerateMathPow(ZoneList<Expression*>* args);
529 void GenerateMathSin(ZoneList<Expression*>* args);
530 void GenerateMathCos(ZoneList<Expression*>* args);
531 void GenerateMathSqrt(ZoneList<Expression*>* args);
533 // Simple condition analysis.
534 enum ConditionAnalysis {
539 ConditionAnalysis AnalyzeCondition(Expression* cond);
541 // Methods used to indicate which source code is generated for. Source
542 // positions are collected by the assembler and emitted with the relocation
544 void CodeForFunctionPosition(FunctionLiteral* fun);
545 void CodeForReturnPosition(FunctionLiteral* fun);
546 void CodeForStatementPosition(Statement* node);
547 void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
548 void CodeForSourcePosition(int pos);
551 // True if the registers are valid for entry to a block.
552 bool HasValidEntryRegisters();
555 List<DeferredCode*> deferred_;
558 MacroAssembler* masm_; // to generate code
560 CompilationInfo* info_;
562 // Code generation state
563 VirtualFrame* frame_;
564 RegisterAllocator* allocator_;
566 CodeGenState* state_;
569 Vector<TypeInfo>* type_info_;
572 BreakTarget function_return_;
574 // True if the function return is shadowed (ie, jumping to the target
575 // function_return_ does not jump to the true function return, but rather
576 // to some unlinking code).
577 bool function_return_is_shadowed_;
579 static InlineRuntimeLUT kInlineRuntimeLUT[];
581 friend class VirtualFrame;
582 friend class JumpTarget;
583 friend class Reference;
584 friend class FastCodeGenerator;
585 friend class FullCodeGenerator;
586 friend class FullCodeGenSyntaxChecker;
588 DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
592 // Compute a transcendental math function natively, or call the
593 // TranscendentalCache runtime function.
594 class TranscendentalCacheStub: public CodeStub {
596 explicit TranscendentalCacheStub(TranscendentalCache::Type type)
598 void Generate(MacroAssembler* masm);
600 TranscendentalCache::Type type_;
601 Major MajorKey() { return TranscendentalCache; }
602 int MinorKey() { return type_; }
603 Runtime::FunctionId RuntimeFunction();
607 class GenericBinaryOpStub : public CodeStub {
609 GenericBinaryOpStub(Token::Value op,
613 int constant_rhs = CodeGenerator::kUnknownIntValue)
618 constant_rhs_(constant_rhs),
619 specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
620 runtime_operands_type_(BinaryOpIC::DEFAULT),
623 GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
624 : op_(OpBits::decode(key)),
625 mode_(ModeBits::decode(key)),
626 lhs_(LhsRegister(RegisterBits::decode(key))),
627 rhs_(RhsRegister(RegisterBits::decode(key))),
628 constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
629 specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
630 runtime_operands_type_(type_info),
639 bool specialized_on_rhs_;
640 BinaryOpIC::TypeInfo runtime_operands_type_;
643 static const int kMaxKnownRhs = 0x40000000;
644 static const int kKnownRhsKeyBits = 6;
646 // Minor key encoding in 17 bits.
647 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
648 class OpBits: public BitField<Token::Value, 2, 6> {};
649 class TypeInfoBits: public BitField<int, 8, 2> {};
650 class RegisterBits: public BitField<bool, 10, 1> {};
651 class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
653 Major MajorKey() { return GenericBinaryOp; }
655 ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
656 (lhs_.is(r1) && rhs_.is(r0)));
657 // Encode the parameters in a unique 18 bit value.
658 return OpBits::encode(op_)
659 | ModeBits::encode(mode_)
660 | KnownIntBits::encode(MinorKeyForKnownInt())
661 | TypeInfoBits::encode(runtime_operands_type_)
662 | RegisterBits::encode(lhs_.is(r0));
665 void Generate(MacroAssembler* masm);
666 void HandleNonSmiBitwiseOp(MacroAssembler* masm, Register lhs, Register rhs);
667 void HandleBinaryOpSlowCases(MacroAssembler* masm,
671 const Builtins::JavaScript& builtin);
672 void GenerateTypeTransition(MacroAssembler* masm);
674 static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
675 if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
676 if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
677 if (op == Token::MOD) {
678 if (constant_rhs <= 1) return false;
679 if (constant_rhs <= 10) return true;
680 if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
686 int MinorKeyForKnownInt() {
687 if (!specialized_on_rhs_) return 0;
688 if (constant_rhs_ <= 10) return constant_rhs_ + 1;
689 ASSERT(IsPowerOf2(constant_rhs_));
691 int d = constant_rhs_;
692 while ((d & 1) == 0) {
696 ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
700 int KnownBitsForMinorKey(int key) {
702 if (key <= 11) return key - 1;
711 Register LhsRegister(bool lhs_is_r0) {
712 return lhs_is_r0 ? r0 : r1;
715 Register RhsRegister(bool lhs_is_r0) {
716 return lhs_is_r0 ? r1 : r0;
719 bool ShouldGenerateSmiCode() {
720 return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
721 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
722 runtime_operands_type_ != BinaryOpIC::STRINGS;
725 bool ShouldGenerateFPCode() {
726 return runtime_operands_type_ != BinaryOpIC::STRINGS;
729 virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
731 virtual InlineCacheState GetICState() {
732 return BinaryOpIC::ToState(runtime_operands_type_);
735 const char* GetName();
739 if (!specialized_on_rhs_) {
740 PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
742 PrintF("GenericBinaryOpStub (%s by %d)\n",
751 class StringHelper : public AllStatic {
753 // Generate code for copying characters using a simple loop. This should only
754 // be used in places where the number of characters is small and the
755 // additional setup and checking in GenerateCopyCharactersLong adds too much
756 // overhead. Copying of overlapping regions is not supported.
757 // Dest register ends at the position after the last character written.
758 static void GenerateCopyCharacters(MacroAssembler* masm,
765 // Generate code for copying a large number of characters. This function
766 // is allowed to spend extra time setting up conditions to make copying
767 // faster. Copying of overlapping regions is not supported.
768 // Dest register ends at the position after the last character written.
769 static void GenerateCopyCharactersLong(MacroAssembler* masm,
781 // Probe the symbol table for a two character string. If the string is
782 // not found by probing a jump to the label not_found is performed. This jump
783 // does not guarantee that the string is not in the symbol table. If the
784 // string is found the code falls through with the string in register r0.
785 // Contents of both c1 and c2 registers are modified. At the exit c1 is
786 // guaranteed to contain halfword with low and high bytes equal to
787 // initial contents of c1 and c2 respectively.
788 static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
798 // Generate string hash.
799 static void GenerateHashInit(MacroAssembler* masm,
803 static void GenerateHashAddCharacter(MacroAssembler* masm,
807 static void GenerateHashGetHash(MacroAssembler* masm,
811 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
815 // Flag that indicates how to generate code for the stub StringAddStub.
816 enum StringAddFlags {
817 NO_STRING_ADD_FLAGS = 0,
818 NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
822 class StringAddStub: public CodeStub {
824 explicit StringAddStub(StringAddFlags flags) {
825 string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
829 Major MajorKey() { return StringAdd; }
830 int MinorKey() { return string_check_ ? 0 : 1; }
832 void Generate(MacroAssembler* masm);
834 // Should the stub check whether arguments are strings?
839 class SubStringStub: public CodeStub {
844 Major MajorKey() { return SubString; }
845 int MinorKey() { return 0; }
847 void Generate(MacroAssembler* masm);
852 class StringCompareStub: public CodeStub {
854 StringCompareStub() { }
856 // Compare two flat ASCII strings and returns result in r0.
857 // Does not use the stack.
858 static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
867 Major MajorKey() { return StringCompare; }
868 int MinorKey() { return 0; }
870 void Generate(MacroAssembler* masm);
874 // This stub can convert a signed int32 to a heap number (double). It does
875 // not work for int32s that are in Smi range! No GC occurs during this stub
876 // so you don't have to set up the frame.
877 class WriteInt32ToHeapNumberStub : public CodeStub {
879 WriteInt32ToHeapNumberStub(Register the_int,
880 Register the_heap_number,
883 the_heap_number_(the_heap_number),
884 scratch_(scratch) { }
888 Register the_heap_number_;
891 // Minor key encoding in 16 bits.
892 class IntRegisterBits: public BitField<int, 0, 4> {};
893 class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
894 class ScratchRegisterBits: public BitField<int, 8, 4> {};
896 Major MajorKey() { return WriteInt32ToHeapNumber; }
898 // Encode the parameters in a unique 16 bit value.
899 return IntRegisterBits::encode(the_int_.code())
900 | HeapNumberRegisterBits::encode(the_heap_number_.code())
901 | ScratchRegisterBits::encode(scratch_.code());
904 void Generate(MacroAssembler* masm);
906 const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
909 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
914 class NumberToStringStub: public CodeStub {
916 NumberToStringStub() { }
918 // Generate code to do a lookup in the number string cache. If the number in
919 // the register object is found in the cache the generated code falls through
920 // with the result in the result register. The object and the result register
921 // can be the same. If the number is not found in the cache the code jumps to
922 // the label not_found with only the content of register object unchanged.
923 static void GenerateLookupNumberStringCache(MacroAssembler* masm,
933 Major MajorKey() { return NumberToString; }
934 int MinorKey() { return 0; }
936 void Generate(MacroAssembler* masm);
938 const char* GetName() { return "NumberToStringStub"; }
942 PrintF("NumberToStringStub\n");
948 class RecordWriteStub : public CodeStub {
950 RecordWriteStub(Register object, Register offset, Register scratch)
951 : object_(object), offset_(offset), scratch_(scratch) { }
953 void Generate(MacroAssembler* masm);
962 PrintF("RecordWriteStub (object reg %d), (offset reg %d),"
963 " (scratch reg %d)\n",
964 object_.code(), offset_.code(), scratch_.code());
968 // Minor key encoding in 12 bits. 4 bits for each of the three
969 // registers (object, offset and scratch) OOOOAAAASSSS.
970 class ScratchBits: public BitField<uint32_t, 0, 4> {};
971 class OffsetBits: public BitField<uint32_t, 4, 4> {};
972 class ObjectBits: public BitField<uint32_t, 8, 4> {};
974 Major MajorKey() { return RecordWrite; }
977 // Encode the registers.
978 return ObjectBits::encode(object_.code()) |
979 OffsetBits::encode(offset_.code()) |
980 ScratchBits::encode(scratch_.code());
985 } } // namespace v8::internal
987 #endif // V8_ARM_CODEGEN_ARM_H_