1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef V8_ARM_CODEGEN_ARM_H_
29 #define V8_ARM_CODEGEN_ARM_H_
37 // Forward declarations
38 class CompilationInfo;
41 class RegisterAllocator;
44 enum InitState { CONST_INIT, NOT_CONST_INIT };
45 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
48 // -------------------------------------------------------------------------
51 // A reference is a C++ stack-allocated object that puts a
52 // reference on the virtual frame. The reference may be consumed
53 // by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
54 // When the lifetime (scope) of a valid reference ends, it must have
55 // been consumed, and be in state UNLOADED.
56 class Reference BASE_EMBEDDED {
58 // The values of the types is important, see size().
59 enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
60 Reference(CodeGenerator* cgen,
61 Expression* expression,
62 bool persist_after_get = false);
65 Expression* expression() const { return expression_; }
66 Type type() const { return type_; }
67 void set_type(Type value) {
68 ASSERT_EQ(ILLEGAL, type_);
73 ASSERT_NE(ILLEGAL, type_);
74 ASSERT_NE(UNLOADED, type_);
77 // The size the reference takes up on the stack.
79 return (type_ < SLOT) ? 0 : type_;
82 bool is_illegal() const { return type_ == ILLEGAL; }
83 bool is_slot() const { return type_ == SLOT; }
84 bool is_property() const { return type_ == NAMED || type_ == KEYED; }
85 bool is_unloaded() const { return type_ == UNLOADED; }
87 // Return the name. Only valid for named property references.
88 Handle<String> GetName();
90 // Generate code to push the value of the reference on top of the
91 // expression stack. The reference is expected to be already on top of
92 // the expression stack, and it is consumed by the call unless the
93 // reference is for a compound assignment.
94 // If the reference is not consumed, it is left in place under its value.
97 // Generate code to store the value on top of the expression stack in the
98 // reference. The reference is expected to be immediately below the value
99 // on the expression stack. The value is stored in the location specified
100 // by the reference, and is left on top of the stack, after the reference
101 // is popped from beneath it (unloaded).
102 void SetValue(InitState init_state);
105 CodeGenerator* cgen_;
106 Expression* expression_;
108 // Keep the reference on the stack after get, so it can be used by set later.
109 bool persist_after_get_;
113 // -------------------------------------------------------------------------
114 // Code generation state
116 // The state is passed down the AST by the code generator (and back up, in
117 // the form of the state of the label pair). It is threaded through the
118 // call stack. Constructing a state implicitly pushes it on the owning code
119 // generator's stack of states, and destroying one implicitly pops it.
121 class CodeGenState BASE_EMBEDDED {
123 // Create an initial code generator state. Destroying the initial state
124 // leaves the code generator with a NULL state.
125 explicit CodeGenState(CodeGenerator* owner);
127 // Create a code generator state based on a code generator's current
128 // state. The new state has its own pair of branch labels.
129 CodeGenState(CodeGenerator* owner,
130 JumpTarget* true_target,
131 JumpTarget* false_target);
133 // Destroy a code generator state and restore the owning code generator's
137 JumpTarget* true_target() const { return true_target_; }
138 JumpTarget* false_target() const { return false_target_; }
141 CodeGenerator* owner_;
142 JumpTarget* true_target_;
143 JumpTarget* false_target_;
144 CodeGenState* previous_;
148 // -------------------------------------------------------------------------
149 // Arguments allocation mode
151 enum ArgumentsAllocationMode {
152 NO_ARGUMENTS_ALLOCATION,
153 EAGER_ARGUMENTS_ALLOCATION,
154 LAZY_ARGUMENTS_ALLOCATION
158 // Different nop operations are used by the code generator to detect certain
159 // states of the generated code.
160 enum NopMarkerTypes {
162 PROPERTY_ACCESS_INLINED
166 // -------------------------------------------------------------------------
169 class CodeGenerator: public AstVisitor {
171 // Takes a function literal, generates code for it. This function should only
172 // be called by compiler.cc.
173 static Handle<Code> MakeCode(CompilationInfo* info);
175 // Printing of AST, etc. as requested by flags.
176 static void MakeCodePrologue(CompilationInfo* info);
178 // Allocate and install the code.
179 static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
181 CompilationInfo* info);
183 #ifdef ENABLE_LOGGING_AND_PROFILING
184 static bool ShouldGenerateLog(Expression* type);
187 static void SetFunctionInfo(Handle<JSFunction> fun,
188 FunctionLiteral* lit,
190 Handle<Script> script);
192 static void RecordPositions(MacroAssembler* masm, int pos);
195 MacroAssembler* masm() { return masm_; }
196 VirtualFrame* frame() const { return frame_; }
197 inline Handle<Script> script();
199 bool has_valid_frame() const { return frame_ != NULL; }
201 // Set the virtual frame to be new_frame, with non-frame register
202 // reference counts given by non_frame_registers. The non-frame
203 // register reference counts of the old frame are returned in
204 // non_frame_registers.
205 void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
209 RegisterAllocator* allocator() const { return allocator_; }
211 CodeGenState* state() { return state_; }
212 void set_state(CodeGenState* state) { state_ = state; }
214 void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
216 static const int kUnknownIntValue = -1;
218 // If the name is an inline runtime function call return the number of
219 // expected arguments. Otherwise return -1.
220 static int InlineRuntimeCallArgumentsCount(Handle<String> name);
222 // Constants related to patching of inlined load/store.
223 static const int kInlinedKeyedLoadInstructionsAfterPatch = 19;
224 static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
227 // Construction/Destruction
228 explicit CodeGenerator(MacroAssembler* masm);
231 inline bool is_eval();
232 inline Scope* scope();
234 // Generating deferred code.
235 void ProcessDeferred();
238 bool has_cc() const { return cc_reg_ != al; }
239 JumpTarget* true_target() const { return state_->true_target(); }
240 JumpTarget* false_target() const { return state_->false_target(); }
242 // Track loop nesting level.
243 int loop_nesting() const { return loop_nesting_; }
244 void IncrementLoopNesting() { loop_nesting_++; }
245 void DecrementLoopNesting() { loop_nesting_--; }
248 void VisitStatements(ZoneList<Statement*>* statements);
250 #define DEF_VISIT(type) \
251 void Visit##type(type* node);
252 AST_NODE_LIST(DEF_VISIT)
255 // Visit a statement and then spill the virtual frame if control flow can
256 // reach the end of the statement (ie, it does not exit via break,
257 // continue, return, or throw). This function is used temporarily while
258 // the code generator is being transformed.
259 inline void VisitAndSpill(Statement* statement);
261 // Visit a list of statements and then spill the virtual frame if control
262 // flow can reach the end of the list.
263 inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
265 // Main code generation function
266 void Generate(CompilationInfo* info);
268 // Returns the arguments allocation mode.
269 ArgumentsAllocationMode ArgumentsMode();
271 // Store the arguments object and allocate it if necessary.
272 void StoreArgumentsObject(bool initial);
274 // The following are used by class Reference.
275 void LoadReference(Reference* ref);
276 void UnloadReference(Reference* ref);
278 static MemOperand ContextOperand(Register context, int index) {
279 return MemOperand(context, Context::SlotOffset(index));
282 MemOperand SlotOperand(Slot* slot, Register tmp);
284 MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
290 static MemOperand GlobalObject() {
291 return ContextOperand(cp, Context::GLOBAL_INDEX);
294 void LoadCondition(Expression* x,
295 JumpTarget* true_target,
296 JumpTarget* false_target,
298 void Load(Expression* expr);
300 void LoadGlobalReceiver(Register scratch);
302 // Generate code to push the value of an expression on top of the frame
303 // and then spill the frame fully to memory. This function is used
304 // temporarily while the code generator is being transformed.
305 inline void LoadAndSpill(Expression* expression);
307 // Call LoadCondition and then spill the virtual frame unless control flow
308 // cannot reach the end of the expression (ie, by emitting only
309 // unconditional jumps to the control targets).
310 inline void LoadConditionAndSpill(Expression* expression,
311 JumpTarget* true_target,
312 JumpTarget* false_target,
315 // Read a value from a slot and leave it on top of the expression stack.
316 void LoadFromSlot(Slot* slot, TypeofState typeof_state);
317 void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
319 // Store the value on top of the stack to a slot.
320 void StoreToSlot(Slot* slot, InitState init_state);
322 // Support for compiling assignment expressions.
323 void EmitSlotAssignment(Assignment* node);
324 void EmitNamedPropertyAssignment(Assignment* node);
325 void EmitKeyedPropertyAssignment(Assignment* node);
327 // Load a named property, returning it in r0. The receiver is passed on the
328 // stack, and remains there.
329 void EmitNamedLoad(Handle<String> name, bool is_contextual);
331 // Store to a named property. If the store is contextual, value is passed on
332 // the frame and consumed. Otherwise, receiver and value are passed on the
333 // frame and consumed. The result is returned in r0.
334 void EmitNamedStore(Handle<String> name, bool is_contextual);
336 // Load a keyed property, leaving it in r0. The receiver and key are
337 // passed on the stack, and remain there.
338 void EmitKeyedLoad();
340 // Store a keyed property. Key and receiver are on the stack and the value is
341 // in r0. Result is returned in r0.
342 void EmitKeyedStore(StaticType* key_type);
344 void LoadFromGlobalSlotCheckExtensions(Slot* slot,
345 TypeofState typeof_state,
348 // Support for loading from local/global variables and arguments
349 // whose location is known unless they are shadowed by
350 // eval-introduced bindings. Generates no code for unsupported slot
351 // types and therefore expects to fall through to the slow jump target.
352 void EmitDynamicLoadFromSlotFastCase(Slot* slot,
353 TypeofState typeof_state,
357 // Special code for typeof expressions: Unfortunately, we must
358 // be careful when loading the expression in 'typeof'
359 // expressions. We are not allowed to throw reference errors for
360 // non-existing properties of the global object, so we must make it
361 // look like an explicit property access, instead of an access
362 // through the context chain.
363 void LoadTypeofExpression(Expression* x);
365 void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
367 // Generate code that computes a shortcutting logical operation.
368 void GenerateLogicalBooleanOperation(BinaryOperation* node);
370 void GenericBinaryOperation(Token::Value op,
371 OverwriteMode overwrite_mode,
372 int known_rhs = kUnknownIntValue);
373 void VirtualFrameBinaryOperation(Token::Value op,
374 OverwriteMode overwrite_mode,
375 int known_rhs = kUnknownIntValue);
376 void Comparison(Condition cc,
379 bool strict = false);
381 void SmiOperation(Token::Value op,
382 Handle<Object> value,
386 void CallWithArguments(ZoneList<Expression*>* arguments,
387 CallFunctionFlags flags,
390 // An optimized implementation of expressions of the form
391 // x.apply(y, arguments). We call x the applicand and y the receiver.
392 // The optimization avoids allocating an arguments object if possible.
393 void CallApplyLazy(Expression* applicand,
394 Expression* receiver,
395 VariableProxy* arguments,
399 void Branch(bool if_true, JumpTarget* target);
402 struct InlineRuntimeLUT {
403 void (CodeGenerator::*method)(ZoneList<Expression*>*);
408 static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
409 bool CheckForInlineRuntimeCall(CallRuntime* node);
410 static bool PatchInlineRuntimeEntry(Handle<String> name,
411 const InlineRuntimeLUT& new_entry,
412 InlineRuntimeLUT* old_entry);
414 static Handle<Code> ComputeLazyCompile(int argc);
415 void ProcessDeclarations(ZoneList<Declaration*>* declarations);
417 static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
419 // Declare global variables and functions in the given array of
421 void DeclareGlobals(Handle<FixedArray> pairs);
423 // Instantiate the function based on the shared function info.
424 void InstantiateFunction(Handle<SharedFunctionInfo> function_info);
426 // Support for type checks.
427 void GenerateIsSmi(ZoneList<Expression*>* args);
428 void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
429 void GenerateIsArray(ZoneList<Expression*>* args);
430 void GenerateIsRegExp(ZoneList<Expression*>* args);
431 void GenerateIsObject(ZoneList<Expression*>* args);
432 void GenerateIsFunction(ZoneList<Expression*>* args);
433 void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
435 // Support for construct call checks.
436 void GenerateIsConstructCall(ZoneList<Expression*>* args);
438 // Support for arguments.length and arguments[?].
439 void GenerateArgumentsLength(ZoneList<Expression*>* args);
440 void GenerateArguments(ZoneList<Expression*>* args);
442 // Support for accessing the class and value fields of an object.
443 void GenerateClassOf(ZoneList<Expression*>* args);
444 void GenerateValueOf(ZoneList<Expression*>* args);
445 void GenerateSetValueOf(ZoneList<Expression*>* args);
447 // Fast support for charCodeAt(n).
448 void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
450 // Fast support for string.charAt(n) and string[n].
451 void GenerateCharFromCode(ZoneList<Expression*>* args);
453 // Fast support for object equality testing.
454 void GenerateObjectEquals(ZoneList<Expression*>* args);
456 void GenerateLog(ZoneList<Expression*>* args);
458 // Fast support for Math.random().
459 void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
461 // Fast support for StringAdd.
462 void GenerateStringAdd(ZoneList<Expression*>* args);
464 // Fast support for SubString.
465 void GenerateSubString(ZoneList<Expression*>* args);
467 // Fast support for StringCompare.
468 void GenerateStringCompare(ZoneList<Expression*>* args);
470 // Support for direct calls from JavaScript to native RegExp code.
471 void GenerateRegExpExec(ZoneList<Expression*>* args);
473 void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
475 // Support for fast native caches.
476 void GenerateGetFromCache(ZoneList<Expression*>* args);
478 // Fast support for number to string.
479 void GenerateNumberToString(ZoneList<Expression*>* args);
481 // Fast swapping of elements.
482 void GenerateSwapElements(ZoneList<Expression*>* args);
484 // Fast call for custom callbacks.
485 void GenerateCallFunction(ZoneList<Expression*>* args);
487 // Fast call to math functions.
488 void GenerateMathPow(ZoneList<Expression*>* args);
489 void GenerateMathSin(ZoneList<Expression*>* args);
490 void GenerateMathCos(ZoneList<Expression*>* args);
491 void GenerateMathSqrt(ZoneList<Expression*>* args);
493 // Simple condition analysis.
494 enum ConditionAnalysis {
499 ConditionAnalysis AnalyzeCondition(Expression* cond);
501 // Methods used to indicate which source code is generated for. Source
502 // positions are collected by the assembler and emitted with the relocation
504 void CodeForFunctionPosition(FunctionLiteral* fun);
505 void CodeForReturnPosition(FunctionLiteral* fun);
506 void CodeForStatementPosition(Statement* node);
507 void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
508 void CodeForSourcePosition(int pos);
511 // True if the registers are valid for entry to a block.
512 bool HasValidEntryRegisters();
515 List<DeferredCode*> deferred_;
518 MacroAssembler* masm_; // to generate code
520 CompilationInfo* info_;
522 // Code generation state
523 VirtualFrame* frame_;
524 RegisterAllocator* allocator_;
526 CodeGenState* state_;
530 BreakTarget function_return_;
532 // True if the function return is shadowed (ie, jumping to the target
533 // function_return_ does not jump to the true function return, but rather
534 // to some unlinking code).
535 bool function_return_is_shadowed_;
537 static InlineRuntimeLUT kInlineRuntimeLUT[];
539 friend class VirtualFrame;
540 friend class JumpTarget;
541 friend class Reference;
542 friend class FastCodeGenerator;
543 friend class FullCodeGenerator;
544 friend class FullCodeGenSyntaxChecker;
546 DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
550 class GenericBinaryOpStub : public CodeStub {
552 GenericBinaryOpStub(Token::Value op,
556 int constant_rhs = CodeGenerator::kUnknownIntValue)
561 constant_rhs_(constant_rhs),
562 specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
563 runtime_operands_type_(BinaryOpIC::DEFAULT),
566 GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
567 : op_(OpBits::decode(key)),
568 mode_(ModeBits::decode(key)),
569 lhs_(LhsRegister(RegisterBits::decode(key))),
570 rhs_(RhsRegister(RegisterBits::decode(key))),
571 constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
572 specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
573 runtime_operands_type_(type_info),
582 bool specialized_on_rhs_;
583 BinaryOpIC::TypeInfo runtime_operands_type_;
586 static const int kMaxKnownRhs = 0x40000000;
587 static const int kKnownRhsKeyBits = 6;
589 // Minor key encoding in 17 bits.
590 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
591 class OpBits: public BitField<Token::Value, 2, 6> {};
592 class TypeInfoBits: public BitField<int, 8, 2> {};
593 class RegisterBits: public BitField<bool, 10, 1> {};
594 class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
596 Major MajorKey() { return GenericBinaryOp; }
598 ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
599 (lhs_.is(r1) && rhs_.is(r0)));
600 // Encode the parameters in a unique 18 bit value.
601 return OpBits::encode(op_)
602 | ModeBits::encode(mode_)
603 | KnownIntBits::encode(MinorKeyForKnownInt())
604 | TypeInfoBits::encode(runtime_operands_type_)
605 | RegisterBits::encode(lhs_.is(r0));
608 void Generate(MacroAssembler* masm);
609 void HandleNonSmiBitwiseOp(MacroAssembler* masm, Register lhs, Register rhs);
610 void HandleBinaryOpSlowCases(MacroAssembler* masm,
614 const Builtins::JavaScript& builtin);
615 void GenerateTypeTransition(MacroAssembler* masm);
617 static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
618 if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
619 if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
620 if (op == Token::MOD) {
621 if (constant_rhs <= 1) return false;
622 if (constant_rhs <= 10) return true;
623 if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
629 int MinorKeyForKnownInt() {
630 if (!specialized_on_rhs_) return 0;
631 if (constant_rhs_ <= 10) return constant_rhs_ + 1;
632 ASSERT(IsPowerOf2(constant_rhs_));
634 int d = constant_rhs_;
635 while ((d & 1) == 0) {
639 ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
643 int KnownBitsForMinorKey(int key) {
645 if (key <= 11) return key - 1;
654 Register LhsRegister(bool lhs_is_r0) {
655 return lhs_is_r0 ? r0 : r1;
658 Register RhsRegister(bool lhs_is_r0) {
659 return lhs_is_r0 ? r1 : r0;
662 bool ShouldGenerateSmiCode() {
663 return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
664 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
665 runtime_operands_type_ != BinaryOpIC::STRINGS;
668 bool ShouldGenerateFPCode() {
669 return runtime_operands_type_ != BinaryOpIC::STRINGS;
672 virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
674 virtual InlineCacheState GetICState() {
675 return BinaryOpIC::ToState(runtime_operands_type_);
678 const char* GetName();
682 if (!specialized_on_rhs_) {
683 PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
685 PrintF("GenericBinaryOpStub (%s by %d)\n",
694 class StringHelper : public AllStatic {
696 // Generates fast code for getting a char code out of a string
697 // object at the given index. May bail out for four reasons (in the
699 // * Receiver is not a string (receiver_not_string label).
700 // * Index is not a smi (index_not_smi label).
701 // * Index is out of range (index_out_of_range).
702 // * Some other reason (slow_case label). In this case it's
703 // guaranteed that the above conditions are not violated,
704 // e.g. it's safe to assume the receiver is a string and the
705 // index is a non-negative smi < length.
706 // When successful, object, index, and scratch are clobbered.
707 // Otherwise, scratch and result are clobbered.
708 static void GenerateFastCharCodeAt(MacroAssembler* masm,
713 Label* receiver_not_string,
714 Label* index_not_smi,
715 Label* index_out_of_range,
718 // Generates code for creating a one-char string from the given char
719 // code. May do a runtime call, so any register can be clobbered
720 // and, if the given invoke flag specifies a call, an internal frame
721 // is required. In tail call mode the result must be r0 register.
722 static void GenerateCharFromCode(MacroAssembler* masm,
728 // Generate code for copying characters using a simple loop. This should only
729 // be used in places where the number of characters is small and the
730 // additional setup and checking in GenerateCopyCharactersLong adds too much
731 // overhead. Copying of overlapping regions is not supported.
732 // Dest register ends at the position after the last character written.
733 static void GenerateCopyCharacters(MacroAssembler* masm,
740 // Generate code for copying a large number of characters. This function
741 // is allowed to spend extra time setting up conditions to make copying
742 // faster. Copying of overlapping regions is not supported.
743 // Dest register ends at the position after the last character written.
744 static void GenerateCopyCharactersLong(MacroAssembler* masm,
756 // Probe the symbol table for a two character string. If the string is
757 // not found by probing a jump to the label not_found is performed. This jump
758 // does not guarantee that the string is not in the symbol table. If the
759 // string is found the code falls through with the string in register r0.
760 // Contents of both c1 and c2 registers are modified. At the exit c1 is
761 // guaranteed to contain halfword with low and high bytes equal to
762 // initial contents of c1 and c2 respectively.
763 static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
773 // Generate string hash.
774 static void GenerateHashInit(MacroAssembler* masm,
778 static void GenerateHashAddCharacter(MacroAssembler* masm,
782 static void GenerateHashGetHash(MacroAssembler* masm,
786 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
790 // Flag that indicates how to generate code for the stub StringAddStub.
791 enum StringAddFlags {
792 NO_STRING_ADD_FLAGS = 0,
793 NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
797 class StringAddStub: public CodeStub {
799 explicit StringAddStub(StringAddFlags flags) {
800 string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
804 Major MajorKey() { return StringAdd; }
805 int MinorKey() { return string_check_ ? 0 : 1; }
807 void Generate(MacroAssembler* masm);
809 // Should the stub check whether arguments are strings?
814 class SubStringStub: public CodeStub {
819 Major MajorKey() { return SubString; }
820 int MinorKey() { return 0; }
822 void Generate(MacroAssembler* masm);
827 class StringCompareStub: public CodeStub {
829 StringCompareStub() { }
831 // Compare two flat ASCII strings and returns result in r0.
832 // Does not use the stack.
833 static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
842 Major MajorKey() { return StringCompare; }
843 int MinorKey() { return 0; }
845 void Generate(MacroAssembler* masm);
849 // This stub can convert a signed int32 to a heap number (double). It does
850 // not work for int32s that are in Smi range! No GC occurs during this stub
851 // so you don't have to set up the frame.
852 class WriteInt32ToHeapNumberStub : public CodeStub {
854 WriteInt32ToHeapNumberStub(Register the_int,
855 Register the_heap_number,
858 the_heap_number_(the_heap_number),
859 scratch_(scratch) { }
863 Register the_heap_number_;
866 // Minor key encoding in 16 bits.
867 class IntRegisterBits: public BitField<int, 0, 4> {};
868 class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
869 class ScratchRegisterBits: public BitField<int, 8, 4> {};
871 Major MajorKey() { return WriteInt32ToHeapNumber; }
873 // Encode the parameters in a unique 16 bit value.
874 return IntRegisterBits::encode(the_int_.code())
875 | HeapNumberRegisterBits::encode(the_heap_number_.code())
876 | ScratchRegisterBits::encode(scratch_.code());
879 void Generate(MacroAssembler* masm);
881 const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
884 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
889 class NumberToStringStub: public CodeStub {
891 NumberToStringStub() { }
893 // Generate code to do a lookup in the number string cache. If the number in
894 // the register object is found in the cache the generated code falls through
895 // with the result in the result register. The object and the result register
896 // can be the same. If the number is not found in the cache the code jumps to
897 // the label not_found with only the content of register object unchanged.
898 static void GenerateLookupNumberStringCache(MacroAssembler* masm,
908 Major MajorKey() { return NumberToString; }
909 int MinorKey() { return 0; }
911 void Generate(MacroAssembler* masm);
913 const char* GetName() { return "NumberToStringStub"; }
917 PrintF("NumberToStringStub\n");
923 class RecordWriteStub : public CodeStub {
925 RecordWriteStub(Register object, Register offset, Register scratch)
926 : object_(object), offset_(offset), scratch_(scratch) { }
928 void Generate(MacroAssembler* masm);
937 PrintF("RecordWriteStub (object reg %d), (offset reg %d),"
938 " (scratch reg %d)\n",
939 object_.code(), offset_.code(), scratch_.code());
943 // Minor key encoding in 12 bits. 4 bits for each of the three
944 // registers (object, offset and scratch) OOOOAAAASSSS.
945 class ScratchBits: public BitField<uint32_t, 0, 4> {};
946 class OffsetBits: public BitField<uint32_t, 4, 4> {};
947 class ObjectBits: public BitField<uint32_t, 8, 4> {};
949 Major MajorKey() { return RecordWrite; }
952 // Encode the registers.
953 return ObjectBits::encode(object_.code()) |
954 OffsetBits::encode(offset_.code()) |
955 ScratchBits::encode(scratch_.code());
960 } } // namespace v8::internal
962 #endif // V8_ARM_CODEGEN_ARM_H_