1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef V8_ARM_CODEGEN_ARM_H_
29 #define V8_ARM_CODEGEN_ARM_H_
37 // Forward declarations
38 class CompilationInfo;
41 class RegisterAllocator;
44 enum InitState { CONST_INIT, NOT_CONST_INIT };
45 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
46 enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
47 enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
50 // -------------------------------------------------------------------------
53 // A reference is a C++ stack-allocated object that puts a
54 // reference on the virtual frame. The reference may be consumed
55 // by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
56 // When the lifetime (scope) of a valid reference ends, it must have
57 // been consumed, and be in state UNLOADED.
58 class Reference BASE_EMBEDDED {
60 // The values of the types is important, see size().
61 enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
62 Reference(CodeGenerator* cgen,
63 Expression* expression,
64 bool persist_after_get = false);
67 Expression* expression() const { return expression_; }
68 Type type() const { return type_; }
69 void set_type(Type value) {
70 ASSERT_EQ(ILLEGAL, type_);
75 ASSERT_NE(ILLEGAL, type_);
76 ASSERT_NE(UNLOADED, type_);
79 // The size the reference takes up on the stack.
81 return (type_ < SLOT) ? 0 : type_;
84 bool is_illegal() const { return type_ == ILLEGAL; }
85 bool is_slot() const { return type_ == SLOT; }
86 bool is_property() const { return type_ == NAMED || type_ == KEYED; }
87 bool is_unloaded() const { return type_ == UNLOADED; }
89 // Return the name. Only valid for named property references.
90 Handle<String> GetName();
92 // Generate code to push the value of the reference on top of the
93 // expression stack. The reference is expected to be already on top of
94 // the expression stack, and it is consumed by the call unless the
95 // reference is for a compound assignment.
96 // If the reference is not consumed, it is left in place under its value.
99 // Generate code to store the value on top of the expression stack in the
100 // reference. The reference is expected to be immediately below the value
101 // on the expression stack. The value is stored in the location specified
102 // by the reference, and is left on top of the stack, after the reference
103 // is popped from beneath it (unloaded).
104 void SetValue(InitState init_state, WriteBarrierCharacter wb);
106 // This is in preparation for something that uses the reference on the stack.
107 // If we need this reference afterwards get then dup it now. Otherwise mark
109 inline void DupIfPersist();
112 CodeGenerator* cgen_;
113 Expression* expression_;
115 // Keep the reference on the stack after get, so it can be used by set later.
116 bool persist_after_get_;
120 // -------------------------------------------------------------------------
121 // Code generation state
123 // The state is passed down the AST by the code generator (and back up, in
124 // the form of the state of the label pair). It is threaded through the
125 // call stack. Constructing a state implicitly pushes it on the owning code
126 // generator's stack of states, and destroying one implicitly pops it.
128 class CodeGenState BASE_EMBEDDED {
130 // Create an initial code generator state. Destroying the initial state
131 // leaves the code generator with a NULL state.
132 explicit CodeGenState(CodeGenerator* owner);
134 // Destroy a code generator state and restore the owning code generator's
136 virtual ~CodeGenState();
138 virtual JumpTarget* true_target() const { return NULL; }
139 virtual JumpTarget* false_target() const { return NULL; }
142 inline CodeGenerator* owner() { return owner_; }
143 inline CodeGenState* previous() const { return previous_; }
146 CodeGenerator* owner_;
147 CodeGenState* previous_;
151 class ConditionCodeGenState : public CodeGenState {
153 // Create a code generator state based on a code generator's current
154 // state. The new state has its own pair of branch labels.
155 ConditionCodeGenState(CodeGenerator* owner,
156 JumpTarget* true_target,
157 JumpTarget* false_target);
159 virtual JumpTarget* true_target() const { return true_target_; }
160 virtual JumpTarget* false_target() const { return false_target_; }
163 JumpTarget* true_target_;
164 JumpTarget* false_target_;
168 class TypeInfoCodeGenState : public CodeGenState {
170 TypeInfoCodeGenState(CodeGenerator* owner,
173 ~TypeInfoCodeGenState();
175 virtual JumpTarget* true_target() const { return previous()->true_target(); }
176 virtual JumpTarget* false_target() const {
177 return previous()->false_target();
182 TypeInfo old_type_info_;
186 // -------------------------------------------------------------------------
187 // Arguments allocation mode
189 enum ArgumentsAllocationMode {
190 NO_ARGUMENTS_ALLOCATION,
191 EAGER_ARGUMENTS_ALLOCATION,
192 LAZY_ARGUMENTS_ALLOCATION
196 // Different nop operations are used by the code generator to detect certain
197 // states of the generated code.
198 enum NopMarkerTypes {
200 PROPERTY_ACCESS_INLINED
204 // -------------------------------------------------------------------------
207 class CodeGenerator: public AstVisitor {
209 // Takes a function literal, generates code for it. This function should only
210 // be called by compiler.cc.
211 static Handle<Code> MakeCode(CompilationInfo* info);
213 // Printing of AST, etc. as requested by flags.
214 static void MakeCodePrologue(CompilationInfo* info);
216 // Allocate and install the code.
217 static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
219 CompilationInfo* info);
221 #ifdef ENABLE_LOGGING_AND_PROFILING
222 static bool ShouldGenerateLog(Expression* type);
225 static void SetFunctionInfo(Handle<JSFunction> fun,
226 FunctionLiteral* lit,
228 Handle<Script> script);
230 static bool RecordPositions(MacroAssembler* masm,
232 bool right_here = false);
235 MacroAssembler* masm() { return masm_; }
236 VirtualFrame* frame() const { return frame_; }
237 inline Handle<Script> script();
239 bool has_valid_frame() const { return frame_ != NULL; }
241 // Set the virtual frame to be new_frame, with non-frame register
242 // reference counts given by non_frame_registers. The non-frame
243 // register reference counts of the old frame are returned in
244 // non_frame_registers.
245 void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
249 RegisterAllocator* allocator() const { return allocator_; }
251 CodeGenState* state() { return state_; }
252 void set_state(CodeGenState* state) { state_ = state; }
254 TypeInfo type_info(Slot* slot) {
255 int index = NumberOfSlot(slot);
256 if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
257 return (*type_info_)[index];
260 TypeInfo set_type_info(Slot* slot, TypeInfo info) {
261 int index = NumberOfSlot(slot);
262 ASSERT(index >= kInvalidSlotNumber);
263 if (index != kInvalidSlotNumber) {
264 TypeInfo previous_value = (*type_info_)[index];
265 (*type_info_)[index] = info;
266 return previous_value;
268 return TypeInfo::Unknown();
271 void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
273 static const int kUnknownIntValue = -1;
275 // If the name is an inline runtime function call return the number of
276 // expected arguments. Otherwise return -1.
277 static int InlineRuntimeCallArgumentsCount(Handle<String> name);
279 // Constants related to patching of inlined load/store.
280 static int GetInlinedKeyedLoadInstructionsAfterPatch() {
281 return FLAG_debug_code ? 27 : 13;
283 static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
284 static int GetInlinedNamedStoreInstructionsAfterPatch() {
285 ASSERT(inlined_write_barrier_size_ != -1);
286 return inlined_write_barrier_size_ + 4;
290 // Construction/Destruction
291 explicit CodeGenerator(MacroAssembler* masm);
294 inline bool is_eval();
295 inline Scope* scope();
297 // Generating deferred code.
298 void ProcessDeferred();
300 static const int kInvalidSlotNumber = -1;
302 int NumberOfSlot(Slot* slot);
305 bool has_cc() const { return cc_reg_ != al; }
306 JumpTarget* true_target() const { return state_->true_target(); }
307 JumpTarget* false_target() const { return state_->false_target(); }
309 // Track loop nesting level.
310 int loop_nesting() const { return loop_nesting_; }
311 void IncrementLoopNesting() { loop_nesting_++; }
312 void DecrementLoopNesting() { loop_nesting_--; }
315 void VisitStatements(ZoneList<Statement*>* statements);
317 #define DEF_VISIT(type) \
318 void Visit##type(type* node);
319 AST_NODE_LIST(DEF_VISIT)
322 // Main code generation function
323 void Generate(CompilationInfo* info);
325 // Generate the return sequence code. Should be called no more than
326 // once per compiled function, immediately after binding the return
327 // target (which can not be done more than once). The return value should
329 void GenerateReturnSequence();
331 // Returns the arguments allocation mode.
332 ArgumentsAllocationMode ArgumentsMode();
334 // Store the arguments object and allocate it if necessary.
335 void StoreArgumentsObject(bool initial);
337 // The following are used by class Reference.
338 void LoadReference(Reference* ref);
339 void UnloadReference(Reference* ref);
341 static MemOperand ContextOperand(Register context, int index) {
342 return MemOperand(context, Context::SlotOffset(index));
345 MemOperand SlotOperand(Slot* slot, Register tmp);
347 MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
353 static MemOperand GlobalObject() {
354 return ContextOperand(cp, Context::GLOBAL_INDEX);
357 void LoadCondition(Expression* x,
358 JumpTarget* true_target,
359 JumpTarget* false_target,
361 void Load(Expression* expr);
363 void LoadGlobalReceiver(Register scratch);
365 // Read a value from a slot and leave it on top of the expression stack.
366 void LoadFromSlot(Slot* slot, TypeofState typeof_state);
367 void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
369 // Store the value on top of the stack to a slot.
370 void StoreToSlot(Slot* slot, InitState init_state);
372 // Support for compiling assignment expressions.
373 void EmitSlotAssignment(Assignment* node);
374 void EmitNamedPropertyAssignment(Assignment* node);
375 void EmitKeyedPropertyAssignment(Assignment* node);
377 // Load a named property, returning it in r0. The receiver is passed on the
378 // stack, and remains there.
379 void EmitNamedLoad(Handle<String> name, bool is_contextual);
381 // Store to a named property. If the store is contextual, value is passed on
382 // the frame and consumed. Otherwise, receiver and value are passed on the
383 // frame and consumed. The result is returned in r0.
384 void EmitNamedStore(Handle<String> name, bool is_contextual);
386 // Load a keyed property, leaving it in r0. The receiver and key are
387 // passed on the stack, and remain there.
388 void EmitKeyedLoad();
390 // Store a keyed property. Key and receiver are on the stack and the value is
391 // in r0. Result is returned in r0.
392 void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
394 void LoadFromGlobalSlotCheckExtensions(Slot* slot,
395 TypeofState typeof_state,
398 // Support for loading from local/global variables and arguments
399 // whose location is known unless they are shadowed by
400 // eval-introduced bindings. Generates no code for unsupported slot
401 // types and therefore expects to fall through to the slow jump target.
402 void EmitDynamicLoadFromSlotFastCase(Slot* slot,
403 TypeofState typeof_state,
407 // Special code for typeof expressions: Unfortunately, we must
408 // be careful when loading the expression in 'typeof'
409 // expressions. We are not allowed to throw reference errors for
410 // non-existing properties of the global object, so we must make it
411 // look like an explicit property access, instead of an access
412 // through the context chain.
413 void LoadTypeofExpression(Expression* x);
415 void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
417 // Generate code that computes a shortcutting logical operation.
418 void GenerateLogicalBooleanOperation(BinaryOperation* node);
420 void GenericBinaryOperation(Token::Value op,
421 OverwriteMode overwrite_mode,
422 GenerateInlineSmi inline_smi,
423 int known_rhs = kUnknownIntValue);
424 void Comparison(Condition cc,
427 bool strict = false);
429 void SmiOperation(Token::Value op,
430 Handle<Object> value,
434 void CallWithArguments(ZoneList<Expression*>* arguments,
435 CallFunctionFlags flags,
438 // An optimized implementation of expressions of the form
439 // x.apply(y, arguments). We call x the applicand and y the receiver.
440 // The optimization avoids allocating an arguments object if possible.
441 void CallApplyLazy(Expression* applicand,
442 Expression* receiver,
443 VariableProxy* arguments,
447 void Branch(bool if_true, JumpTarget* target);
450 struct InlineRuntimeLUT {
451 void (CodeGenerator::*method)(ZoneList<Expression*>*);
456 static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
457 bool CheckForInlineRuntimeCall(CallRuntime* node);
458 static bool PatchInlineRuntimeEntry(Handle<String> name,
459 const InlineRuntimeLUT& new_entry,
460 InlineRuntimeLUT* old_entry);
462 static Handle<Code> ComputeLazyCompile(int argc);
463 void ProcessDeclarations(ZoneList<Declaration*>* declarations);
465 static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
467 static Handle<Code> ComputeKeyedCallInitialize(int argc, InLoopFlag in_loop);
469 // Declare global variables and functions in the given array of
471 void DeclareGlobals(Handle<FixedArray> pairs);
473 // Instantiate the function based on the shared function info.
474 void InstantiateFunction(Handle<SharedFunctionInfo> function_info);
476 // Support for type checks.
477 void GenerateIsSmi(ZoneList<Expression*>* args);
478 void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
479 void GenerateIsArray(ZoneList<Expression*>* args);
480 void GenerateIsRegExp(ZoneList<Expression*>* args);
481 void GenerateIsObject(ZoneList<Expression*>* args);
482 void GenerateIsSpecObject(ZoneList<Expression*>* args);
483 void GenerateIsFunction(ZoneList<Expression*>* args);
484 void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
486 // Support for construct call checks.
487 void GenerateIsConstructCall(ZoneList<Expression*>* args);
489 // Support for arguments.length and arguments[?].
490 void GenerateArgumentsLength(ZoneList<Expression*>* args);
491 void GenerateArguments(ZoneList<Expression*>* args);
493 // Support for accessing the class and value fields of an object.
494 void GenerateClassOf(ZoneList<Expression*>* args);
495 void GenerateValueOf(ZoneList<Expression*>* args);
496 void GenerateSetValueOf(ZoneList<Expression*>* args);
498 // Fast support for charCodeAt(n).
499 void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
501 // Fast support for string.charAt(n) and string[n].
502 void GenerateStringCharFromCode(ZoneList<Expression*>* args);
504 // Fast support for string.charAt(n) and string[n].
505 void GenerateStringCharAt(ZoneList<Expression*>* args);
507 // Fast support for object equality testing.
508 void GenerateObjectEquals(ZoneList<Expression*>* args);
510 void GenerateLog(ZoneList<Expression*>* args);
512 // Fast support for Math.random().
513 void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
515 // Fast support for StringAdd.
516 void GenerateStringAdd(ZoneList<Expression*>* args);
518 // Fast support for SubString.
519 void GenerateSubString(ZoneList<Expression*>* args);
521 // Fast support for StringCompare.
522 void GenerateStringCompare(ZoneList<Expression*>* args);
524 // Support for direct calls from JavaScript to native RegExp code.
525 void GenerateRegExpExec(ZoneList<Expression*>* args);
527 void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
529 // Support for fast native caches.
530 void GenerateGetFromCache(ZoneList<Expression*>* args);
532 // Fast support for number to string.
533 void GenerateNumberToString(ZoneList<Expression*>* args);
535 // Fast swapping of elements.
536 void GenerateSwapElements(ZoneList<Expression*>* args);
538 // Fast call for custom callbacks.
539 void GenerateCallFunction(ZoneList<Expression*>* args);
541 // Fast call to math functions.
542 void GenerateMathPow(ZoneList<Expression*>* args);
543 void GenerateMathSin(ZoneList<Expression*>* args);
544 void GenerateMathCos(ZoneList<Expression*>* args);
545 void GenerateMathSqrt(ZoneList<Expression*>* args);
547 // Simple condition analysis.
548 enum ConditionAnalysis {
553 ConditionAnalysis AnalyzeCondition(Expression* cond);
555 // Methods used to indicate which source code is generated for. Source
556 // positions are collected by the assembler and emitted with the relocation
558 void CodeForFunctionPosition(FunctionLiteral* fun);
559 void CodeForReturnPosition(FunctionLiteral* fun);
560 void CodeForStatementPosition(Statement* node);
561 void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
562 void CodeForSourcePosition(int pos);
565 // True if the registers are valid for entry to a block.
566 bool HasValidEntryRegisters();
569 List<DeferredCode*> deferred_;
572 MacroAssembler* masm_; // to generate code
574 CompilationInfo* info_;
576 // Code generation state
577 VirtualFrame* frame_;
578 RegisterAllocator* allocator_;
580 CodeGenState* state_;
583 Vector<TypeInfo>* type_info_;
586 BreakTarget function_return_;
588 // True if the function return is shadowed (ie, jumping to the target
589 // function_return_ does not jump to the true function return, but rather
590 // to some unlinking code).
591 bool function_return_is_shadowed_;
593 // Size of inlined write barriers generated by EmitNamedStore.
594 static int inlined_write_barrier_size_;
596 static InlineRuntimeLUT kInlineRuntimeLUT[];
598 friend class VirtualFrame;
599 friend class JumpTarget;
600 friend class Reference;
601 friend class FastCodeGenerator;
602 friend class FullCodeGenerator;
603 friend class FullCodeGenSyntaxChecker;
605 DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
609 // Compute a transcendental math function natively, or call the
610 // TranscendentalCache runtime function.
611 class TranscendentalCacheStub: public CodeStub {
613 explicit TranscendentalCacheStub(TranscendentalCache::Type type)
615 void Generate(MacroAssembler* masm);
617 TranscendentalCache::Type type_;
618 Major MajorKey() { return TranscendentalCache; }
619 int MinorKey() { return type_; }
620 Runtime::FunctionId RuntimeFunction();
624 class GenericBinaryOpStub : public CodeStub {
626 GenericBinaryOpStub(Token::Value op,
630 int constant_rhs = CodeGenerator::kUnknownIntValue)
635 constant_rhs_(constant_rhs),
636 specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
637 runtime_operands_type_(BinaryOpIC::DEFAULT),
640 GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
641 : op_(OpBits::decode(key)),
642 mode_(ModeBits::decode(key)),
643 lhs_(LhsRegister(RegisterBits::decode(key))),
644 rhs_(RhsRegister(RegisterBits::decode(key))),
645 constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
646 specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
647 runtime_operands_type_(type_info),
656 bool specialized_on_rhs_;
657 BinaryOpIC::TypeInfo runtime_operands_type_;
660 static const int kMaxKnownRhs = 0x40000000;
661 static const int kKnownRhsKeyBits = 6;
663 // Minor key encoding in 17 bits.
664 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
665 class OpBits: public BitField<Token::Value, 2, 6> {};
666 class TypeInfoBits: public BitField<int, 8, 2> {};
667 class RegisterBits: public BitField<bool, 10, 1> {};
668 class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
670 Major MajorKey() { return GenericBinaryOp; }
672 ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
673 (lhs_.is(r1) && rhs_.is(r0)));
674 // Encode the parameters in a unique 18 bit value.
675 return OpBits::encode(op_)
676 | ModeBits::encode(mode_)
677 | KnownIntBits::encode(MinorKeyForKnownInt())
678 | TypeInfoBits::encode(runtime_operands_type_)
679 | RegisterBits::encode(lhs_.is(r0));
682 void Generate(MacroAssembler* masm);
683 void HandleNonSmiBitwiseOp(MacroAssembler* masm,
686 void HandleBinaryOpSlowCases(MacroAssembler* masm,
690 const Builtins::JavaScript& builtin);
691 void GenerateTypeTransition(MacroAssembler* masm);
693 static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
694 if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
695 if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
696 if (op == Token::MOD) {
697 if (constant_rhs <= 1) return false;
698 if (constant_rhs <= 10) return true;
699 if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
705 int MinorKeyForKnownInt() {
706 if (!specialized_on_rhs_) return 0;
707 if (constant_rhs_ <= 10) return constant_rhs_ + 1;
708 ASSERT(IsPowerOf2(constant_rhs_));
710 int d = constant_rhs_;
711 while ((d & 1) == 0) {
715 ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
719 int KnownBitsForMinorKey(int key) {
721 if (key <= 11) return key - 1;
730 Register LhsRegister(bool lhs_is_r0) {
731 return lhs_is_r0 ? r0 : r1;
734 Register RhsRegister(bool lhs_is_r0) {
735 return lhs_is_r0 ? r1 : r0;
738 bool ShouldGenerateSmiCode() {
739 return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
740 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
741 runtime_operands_type_ != BinaryOpIC::STRINGS;
744 bool ShouldGenerateFPCode() {
745 return runtime_operands_type_ != BinaryOpIC::STRINGS;
748 virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
750 virtual InlineCacheState GetICState() {
751 return BinaryOpIC::ToState(runtime_operands_type_);
754 const char* GetName();
758 if (!specialized_on_rhs_) {
759 PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
761 PrintF("GenericBinaryOpStub (%s by %d)\n",
770 class StringHelper : public AllStatic {
772 // Generate code for copying characters using a simple loop. This should only
773 // be used in places where the number of characters is small and the
774 // additional setup and checking in GenerateCopyCharactersLong adds too much
775 // overhead. Copying of overlapping regions is not supported.
776 // Dest register ends at the position after the last character written.
777 static void GenerateCopyCharacters(MacroAssembler* masm,
784 // Generate code for copying a large number of characters. This function
785 // is allowed to spend extra time setting up conditions to make copying
786 // faster. Copying of overlapping regions is not supported.
787 // Dest register ends at the position after the last character written.
788 static void GenerateCopyCharactersLong(MacroAssembler* masm,
800 // Probe the symbol table for a two character string. If the string is
801 // not found by probing a jump to the label not_found is performed. This jump
802 // does not guarantee that the string is not in the symbol table. If the
803 // string is found the code falls through with the string in register r0.
804 // Contents of both c1 and c2 registers are modified. At the exit c1 is
805 // guaranteed to contain halfword with low and high bytes equal to
806 // initial contents of c1 and c2 respectively.
807 static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
817 // Generate string hash.
818 static void GenerateHashInit(MacroAssembler* masm,
822 static void GenerateHashAddCharacter(MacroAssembler* masm,
826 static void GenerateHashGetHash(MacroAssembler* masm,
830 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
834 // Flag that indicates how to generate code for the stub StringAddStub.
835 enum StringAddFlags {
836 NO_STRING_ADD_FLAGS = 0,
837 NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
841 class StringAddStub: public CodeStub {
843 explicit StringAddStub(StringAddFlags flags) {
844 string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
848 Major MajorKey() { return StringAdd; }
849 int MinorKey() { return string_check_ ? 0 : 1; }
851 void Generate(MacroAssembler* masm);
853 // Should the stub check whether arguments are strings?
858 class SubStringStub: public CodeStub {
863 Major MajorKey() { return SubString; }
864 int MinorKey() { return 0; }
866 void Generate(MacroAssembler* masm);
871 class StringCompareStub: public CodeStub {
873 StringCompareStub() { }
875 // Compare two flat ASCII strings and returns result in r0.
876 // Does not use the stack.
877 static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
886 Major MajorKey() { return StringCompare; }
887 int MinorKey() { return 0; }
889 void Generate(MacroAssembler* masm);
893 // This stub can do a fast mod operation without using fp.
894 // It is tail called from the GenericBinaryOpStub and it always
895 // returns an answer. It never causes GC so it doesn't need a real frame.
897 // The inputs are always positive Smis. This is never called
898 // where the denominator is a power of 2. We handle that separately.
900 // If we consider the denominator as an odd number multiplied by a power of 2,
902 // * The exponent (power of 2) is in the shift_distance register.
903 // * The odd number is in the odd_number register. It is always in the range
905 // * The bits from the numerator that are to be copied to the answer (there are
906 // shift_distance of them) are in the mask_bits register.
907 // * The other bits of the numerator have been shifted down and are in the lhs
909 class IntegerModStub : public CodeStub {
911 IntegerModStub(Register result,
912 Register shift_distance,
918 shift_distance_(shift_distance),
919 odd_number_(odd_number),
920 mask_bits_(mask_bits),
923 // We don't code these in the minor key, so they should always be the same.
924 // We don't really want to fix that since this stub is rather large and we
925 // don't want many copies of it.
926 ASSERT(shift_distance_.is(r9));
927 ASSERT(odd_number_.is(r4));
928 ASSERT(mask_bits_.is(r3));
929 ASSERT(scratch_.is(r5));
934 Register shift_distance_;
935 Register odd_number_;
940 // Minor key encoding in 16 bits.
941 class ResultRegisterBits: public BitField<int, 0, 4> {};
942 class LhsRegisterBits: public BitField<int, 4, 4> {};
944 Major MajorKey() { return IntegerMod; }
946 // Encode the parameters in a unique 16 bit value.
947 return ResultRegisterBits::encode(result_.code())
948 | LhsRegisterBits::encode(lhs_.code());
951 void Generate(MacroAssembler* masm);
953 const char* GetName() { return "IntegerModStub"; }
955 // Utility functions.
956 void DigitSum(MacroAssembler* masm,
961 void DigitSum(MacroAssembler* masm,
968 void ModGetInRangeBySubtraction(MacroAssembler* masm,
972 void ModReduce(MacroAssembler* masm,
976 void ModAnswer(MacroAssembler* masm,
978 Register shift_distance,
980 Register sum_of_digits);
984 void Print() { PrintF("IntegerModStub\n"); }
989 // This stub can convert a signed int32 to a heap number (double). It does
990 // not work for int32s that are in Smi range! No GC occurs during this stub
991 // so you don't have to set up the frame.
992 class WriteInt32ToHeapNumberStub : public CodeStub {
994 WriteInt32ToHeapNumberStub(Register the_int,
995 Register the_heap_number,
998 the_heap_number_(the_heap_number),
999 scratch_(scratch) { }
1003 Register the_heap_number_;
1006 // Minor key encoding in 16 bits.
1007 class IntRegisterBits: public BitField<int, 0, 4> {};
1008 class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
1009 class ScratchRegisterBits: public BitField<int, 8, 4> {};
1011 Major MajorKey() { return WriteInt32ToHeapNumber; }
1013 // Encode the parameters in a unique 16 bit value.
1014 return IntRegisterBits::encode(the_int_.code())
1015 | HeapNumberRegisterBits::encode(the_heap_number_.code())
1016 | ScratchRegisterBits::encode(scratch_.code());
1019 void Generate(MacroAssembler* masm);
1021 const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
1024 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
1029 class NumberToStringStub: public CodeStub {
1031 NumberToStringStub() { }
1033 // Generate code to do a lookup in the number string cache. If the number in
1034 // the register object is found in the cache the generated code falls through
1035 // with the result in the result register. The object and the result register
1036 // can be the same. If the number is not found in the cache the code jumps to
1037 // the label not_found with only the content of register object unchanged.
1038 static void GenerateLookupNumberStringCache(MacroAssembler* masm,
1048 Major MajorKey() { return NumberToString; }
1049 int MinorKey() { return 0; }
1051 void Generate(MacroAssembler* masm);
1053 const char* GetName() { return "NumberToStringStub"; }
1057 PrintF("NumberToStringStub\n");
1063 class RecordWriteStub : public CodeStub {
1065 RecordWriteStub(Register object, Register offset, Register scratch)
1066 : object_(object), offset_(offset), scratch_(scratch) { }
1068 void Generate(MacroAssembler* masm);
1077 PrintF("RecordWriteStub (object reg %d), (offset reg %d),"
1078 " (scratch reg %d)\n",
1079 object_.code(), offset_.code(), scratch_.code());
1083 // Minor key encoding in 12 bits. 4 bits for each of the three
1084 // registers (object, offset and scratch) OOOOAAAASSSS.
1085 class ScratchBits: public BitField<uint32_t, 0, 4> {};
1086 class OffsetBits: public BitField<uint32_t, 4, 4> {};
1087 class ObjectBits: public BitField<uint32_t, 8, 4> {};
1089 Major MajorKey() { return RecordWrite; }
1092 // Encode the registers.
1093 return ObjectBits::encode(object_.code()) |
1094 OffsetBits::encode(offset_.code()) |
1095 ScratchBits::encode(scratch_.code());
1100 } } // namespace v8::internal
1102 #endif // V8_ARM_CODEGEN_ARM_H_