1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
11 #include "src/base/build_config.h"
12 #include "src/base/logging.h"
13 #include "src/base/macros.h"
15 // Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
16 // warning flag and certain versions of GCC due to a bug:
17 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
18 // For now, we use the more involved template-based version from <limits>, but
19 // only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
20 #if V8_CC_GNU && V8_GNUC_PREREQ(2, 96, 0) && !V8_GNUC_PREREQ(4, 1, 0)
21 # include <limits> // NOLINT
22 # define V8_INFINITY std::numeric_limits<double>::infinity()
24 # define V8_INFINITY HUGE_VAL
26 #define V8_INFINITY (__builtin_inff())
28 # define V8_INFINITY INFINITY
31 #if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \
32 V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
33 V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC
34 #define V8_TURBOFAN_BACKEND 1
36 #define V8_TURBOFAN_BACKEND 0
38 #if V8_TURBOFAN_BACKEND
39 #define V8_TURBOFAN_TARGET 1
41 #define V8_TURBOFAN_TARGET 0
54 // Determine whether we are running in a simulated environment.
55 // Setting USE_SIMULATOR explicitly from the build script will force
56 // the use of a simulated environment.
57 #if !defined(USE_SIMULATOR)
58 #if (V8_TARGET_ARCH_ARM64 && !V8_HOST_ARCH_ARM64)
59 #define USE_SIMULATOR 1
61 #if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
62 #define USE_SIMULATOR 1
64 #if (V8_TARGET_ARCH_PPC && !V8_HOST_ARCH_PPC)
65 #define USE_SIMULATOR 1
67 #if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
68 #define USE_SIMULATOR 1
70 #if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64)
71 #define USE_SIMULATOR 1
75 // Determine whether the architecture uses an out-of-line constant pool.
76 #define V8_OOL_CONSTANT_POOL 0
78 #ifdef V8_TARGET_ARCH_ARM
79 // Set stack limit lower for ARM than for other architectures because
80 // stack allocating MacroAssembler takes 120K bytes.
81 // See issue crbug.com/405338
82 #define V8_DEFAULT_STACK_SIZE_KB 864
84 // Slightly less than 1MB, since Windows' default stack size for
85 // the main execution thread is 1MB for both 32 and 64-bit.
86 #define V8_DEFAULT_STACK_SIZE_KB 984
90 // Determine whether double field unboxing feature is enabled.
91 #if V8_TARGET_ARCH_64_BIT
92 #define V8_DOUBLE_FIELDS_UNBOXING 0
94 #define V8_DOUBLE_FIELDS_UNBOXING 0
99 typedef byte* Address;
101 // -----------------------------------------------------------------------------
105 const int MB = KB * KB;
106 const int GB = KB * KB * KB;
107 const int kMaxInt = 0x7FFFFFFF;
108 const int kMinInt = -kMaxInt - 1;
109 const int kMaxInt8 = (1 << 7) - 1;
110 const int kMinInt8 = -(1 << 7);
111 const int kMaxUInt8 = (1 << 8) - 1;
112 const int kMinUInt8 = 0;
113 const int kMaxInt16 = (1 << 15) - 1;
114 const int kMinInt16 = -(1 << 15);
115 const int kMaxUInt16 = (1 << 16) - 1;
116 const int kMinUInt16 = 0;
118 const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
120 const int kCharSize = sizeof(char); // NOLINT
121 const int kShortSize = sizeof(short); // NOLINT
122 const int kIntSize = sizeof(int); // NOLINT
123 const int kInt32Size = sizeof(int32_t); // NOLINT
124 const int kInt64Size = sizeof(int64_t); // NOLINT
125 const int kDoubleSize = sizeof(double); // NOLINT
126 const int kIntptrSize = sizeof(intptr_t); // NOLINT
127 const int kPointerSize = sizeof(void*); // NOLINT
128 #if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
129 const int kRegisterSize = kPointerSize + kPointerSize;
131 const int kRegisterSize = kPointerSize;
133 const int kPCOnStackSize = kRegisterSize;
134 const int kFPOnStackSize = kRegisterSize;
136 const int kDoubleSizeLog2 = 3;
138 #if V8_HOST_ARCH_64_BIT
139 const int kPointerSizeLog2 = 3;
140 const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
141 const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
142 const bool kRequiresCodeRange = true;
143 const size_t kMaximalCodeRangeSize = 512 * MB;
145 const size_t kMinimumCodeRangeSize = 4 * MB;
146 const size_t kReservedCodeRangePages = 1;
148 const size_t kMinimumCodeRangeSize = 3 * MB;
149 const size_t kReservedCodeRangePages = 0;
152 const int kPointerSizeLog2 = 2;
153 const intptr_t kIntptrSignBit = 0x80000000;
154 const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
155 #if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
156 // x32 port also requires code range.
157 const bool kRequiresCodeRange = true;
158 const size_t kMaximalCodeRangeSize = 256 * MB;
159 const size_t kMinimumCodeRangeSize = 3 * MB;
160 const size_t kReservedCodeRangePages = 0;
162 const bool kRequiresCodeRange = false;
163 const size_t kMaximalCodeRangeSize = 0 * MB;
164 const size_t kMinimumCodeRangeSize = 0 * MB;
165 const size_t kReservedCodeRangePages = 0;
169 STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
171 const int kBitsPerByte = 8;
172 const int kBitsPerByteLog2 = 3;
173 const int kBitsPerPointer = kPointerSize * kBitsPerByte;
174 const int kBitsPerInt = kIntSize * kBitsPerByte;
176 // IEEE 754 single precision floating point number bit layout.
177 const uint32_t kBinary32SignMask = 0x80000000u;
178 const uint32_t kBinary32ExponentMask = 0x7f800000u;
179 const uint32_t kBinary32MantissaMask = 0x007fffffu;
180 const int kBinary32ExponentBias = 127;
181 const int kBinary32MaxExponent = 0xFE;
182 const int kBinary32MinExponent = 0x01;
183 const int kBinary32MantissaBits = 23;
184 const int kBinary32ExponentShift = 23;
186 // Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
188 const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
190 // Latin1/UTF-16 constants
191 // Code-point values in Unicode 4.0 are 21 bits wide.
192 // Code units in UTF-16 are 16 bits wide.
193 typedef uint16_t uc16;
194 typedef int32_t uc32;
195 const int kOneByteSize = kCharSize;
196 const int kUC16Size = sizeof(uc16); // NOLINT
199 // Round up n to be a multiple of sz, where sz is a power of 2.
200 #define ROUND_UP(n, sz) (((n) + ((sz) - 1)) & ~((sz) - 1))
203 // FUNCTION_ADDR(f) gets the address of a C function f.
204 #define FUNCTION_ADDR(f) \
205 (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
208 // FUNCTION_CAST<F>(addr) casts an address into a function
209 // of type F. Used to invoke generated code from within C.
210 template <typename F>
211 F FUNCTION_CAST(Address addr) {
212 return reinterpret_cast<F>(reinterpret_cast<intptr_t>(addr));
216 // -----------------------------------------------------------------------------
217 // Forward declarations for frequently used classes
218 // (sorted alphabetically)
220 class FreeStoreAllocationPolicy;
221 template <typename T, class P = FreeStoreAllocationPolicy> class List;
223 // -----------------------------------------------------------------------------
224 // Declarations for use in both the preparser and the rest of V8.
226 // The Strict Mode (ECMA-262 5th edition, 4.2.2).
229 // LanguageMode is expressed as a bitmask. Descriptions of the bits:
234 // Shorthands for some common language modes.
237 STRONG = STRICT_BIT | STRONG_BIT
241 inline bool is_sloppy(LanguageMode language_mode) {
242 return (language_mode & STRICT_BIT) == 0;
246 inline bool is_strict(LanguageMode language_mode) {
247 return language_mode & STRICT_BIT;
251 inline bool is_strong(LanguageMode language_mode) {
252 return language_mode & STRONG_BIT;
256 inline bool is_valid_language_mode(int language_mode) {
257 return language_mode == SLOPPY || language_mode == STRICT ||
258 language_mode == STRONG;
262 inline LanguageMode construct_language_mode(bool strict_bit, bool strong_bit) {
263 int language_mode = 0;
264 if (strict_bit) language_mode |= STRICT_BIT;
265 if (strong_bit) language_mode |= STRONG_BIT;
266 DCHECK(is_valid_language_mode(language_mode));
267 return static_cast<LanguageMode>(language_mode);
271 // Mask for the sign bit in a smi.
272 const intptr_t kSmiSignMask = kIntptrSignBit;
274 const int kObjectAlignmentBits = kPointerSizeLog2;
275 const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
276 const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
278 // Desired alignment for pointers.
279 const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
280 const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
282 // Desired alignment for double values.
283 const intptr_t kDoubleAlignment = 8;
284 const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
286 // Desired alignment for generated code is 32 bytes (to improve cache line
288 const int kCodeAlignmentBits = 5;
289 const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
290 const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
292 // The owner field of a page is tagged with the page header tag. We need that
293 // to find out if a slot is part of a large object. If we mask out the lower
294 // 0xfffff bits (1M pages), go to the owner offset, and see that this field
295 // is tagged with the page header tag, we can just look up the owner.
296 // Otherwise, we know that we are somewhere (not within the first 1M) in a
298 const int kPageHeaderTag = 3;
299 const int kPageHeaderTagSize = 2;
300 const intptr_t kPageHeaderTagMask = (1 << kPageHeaderTagSize) - 1;
303 // Zap-value: The value used for zapping dead objects.
304 // Should be a recognizable hex value tagged as a failure.
305 #ifdef V8_HOST_ARCH_64_BIT
306 const Address kZapValue =
307 reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef));
308 const Address kHandleZapValue =
309 reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf));
310 const Address kGlobalHandleZapValue =
311 reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf));
312 const Address kFromSpaceZapValue =
313 reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
314 const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
315 const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
316 const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
318 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
319 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
320 const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf);
321 const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
322 const uint32_t kSlotsZapValue = 0xbeefdeef;
323 const uint32_t kDebugZapValue = 0xbadbaddb;
324 const uint32_t kFreeListZapValue = 0xfeed1eaf;
327 const int kCodeZapValue = 0xbadc0de;
328 const uint32_t kPhantomReferenceZap = 0xca11bac;
330 // On Intel architecture, cache line size is 64 bytes.
331 // On ARM it may be less (32 bytes), but as far this constant is
332 // used for aligning data, it doesn't hurt to align on a greater value.
333 #define PROCESSOR_CACHE_LINE_SIZE 64
335 // Constants relevant to double precision floating point numbers.
336 // If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
337 const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
340 // -----------------------------------------------------------------------------
341 // Forward declarations for frequently used classes
355 class DescriptorArray;
356 class TransitionArray;
357 class ExternalReference;
359 class FunctionTemplateInfo;
361 class SeededNumberDictionary;
362 class UnseededNumberDictionary;
363 class NameDictionary;
364 template <typename T> class MaybeHandle;
365 template <typename T> class Handle;
369 class InterceptorInfo;
375 class LargeObjectSpace;
376 class MacroAssembler;
379 class MarkCompactCollector;
388 template <typename Config, class Allocator = FreeStoreAllocationPolicy>
398 class MessageLocation;
400 typedef bool (*WeakSlotCallback)(Object** pointer);
402 typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
404 // -----------------------------------------------------------------------------
407 // NOTE: SpaceIterator depends on AllocationSpace enumeration values being
409 // Keep this enum in sync with the ObjectSpace enum in v8.h
410 enum AllocationSpace {
411 NEW_SPACE, // Semispaces collected with copying collector.
412 OLD_POINTER_SPACE, // May contain pointers to new space.
413 OLD_DATA_SPACE, // Must not have pointers to new space.
414 CODE_SPACE, // No pointers to new space, marked executable.
415 MAP_SPACE, // Only and all map objects.
416 CELL_SPACE, // Only and all cell objects.
417 PROPERTY_CELL_SPACE, // Only and all global property cell objects.
418 LO_SPACE, // Promoted large objects.
420 FIRST_SPACE = NEW_SPACE,
421 LAST_SPACE = LO_SPACE,
422 FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
423 LAST_PAGED_SPACE = PROPERTY_CELL_SPACE
425 const int kSpaceTagSize = 3;
426 const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
429 // A flag that indicates whether objects should be pretenured when
430 // allocated (allocated directly into the old generation) or not
431 // (allocated in the young generation if the object size and type
433 enum PretenureFlag { NOT_TENURED, TENURED };
435 enum MinimumCapacity {
436 USE_DEFAULT_MINIMUM_CAPACITY,
437 USE_CUSTOM_MINIMUM_CAPACITY
440 enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
442 enum Executability { NOT_EXECUTABLE, EXECUTABLE };
446 VISIT_ALL_IN_SCAVENGE,
447 VISIT_ALL_IN_SWEEP_NEWSPACE,
451 // Flag indicating whether code is built into the VM (one of the natives files).
452 enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
455 // A CodeDesc describes a buffer holding instructions and relocation
456 // information. The instructions start at the beginning of the buffer
457 // and grow forward, the relocation information starts at the end of
458 // the buffer and grows backward.
460 // |<--------------- buffer_size ---------------->|
461 // |<-- instr_size -->| |<-- reloc_size -->|
462 // +==================+========+==================+
463 // | instructions | free | reloc info |
464 // +==================+========+==================+
478 // Callback function used for iterating objects in heap spaces,
479 // for example, scanning heap objects.
480 typedef int (*HeapObjectCallback)(HeapObject* obj);
483 // Callback function used for checking constraints when copying/relocating
484 // objects. Returns true if an object can be copied/relocated from its
485 // old_addr to a new_addr.
486 typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
489 // Callback function on inline caches, used for iterating over inline caches
491 typedef void (*InlineCacheCallback)(Code* code, Address ic);
494 // State for inline cache call sites. Aliased as IC::State.
495 enum InlineCacheState {
496 // Has never been executed.
498 // Has been executed but monomorhic state has been delayed.
500 // Has been executed and only one receiver type has been seen.
502 // Check failed due to prototype (or map deprecation).
504 // Multiple receiver types have been seen.
506 // Many receiver types have been seen.
508 // A generic handler is installed and no extra typefeedback is recorded.
510 // Special state for debug break or step in prepare stubs.
512 // Type-vector-based ICs have a default state, with the full calculation
513 // of IC state only determined by a look at the IC and the typevector
519 enum CallFunctionFlags {
520 NO_CALL_FUNCTION_FLAGS,
522 // Always wrap the receiver and call to the JSFunction. Only use this flag
523 // both the receiver type and the target method are statically known.
528 enum CallConstructorFlags {
529 NO_CALL_CONSTRUCTOR_FLAGS = 0,
530 // The call target is cached in the instruction stream.
531 RECORD_CONSTRUCTOR_TARGET = 1,
532 SUPER_CONSTRUCTOR_CALL = 1 << 1,
533 SUPER_CALL_RECORD_TARGET = SUPER_CONSTRUCTOR_CALL | RECORD_CONSTRUCTOR_TARGET
537 enum CacheHolderFlag {
539 kCacheOnPrototypeReceiverIsDictionary,
540 kCacheOnPrototypeReceiverIsPrimitive,
545 // The Store Buffer (GC).
547 kStoreBufferFullEvent,
548 kStoreBufferStartScanningPagesEvent,
549 kStoreBufferScanningPageEvent
553 typedef void (*StoreBufferCallback)(Heap* heap,
555 StoreBufferEvent event);
558 // Union used for fast testing of specific double values.
559 union DoubleRepresentation {
562 DoubleRepresentation(double x) { value = x; }
563 bool operator==(const DoubleRepresentation& other) const {
564 return bits == other.bits;
569 // Union used for customized checking of the IEEE double types
570 // inlined within v8 runtime, rather than going to the underlying
571 // platform headers and libraries
572 union IeeeDoubleLittleEndianArchType {
575 unsigned int man_low :32;
576 unsigned int man_high :20;
577 unsigned int exp :11;
578 unsigned int sign :1;
583 union IeeeDoubleBigEndianArchType {
586 unsigned int sign :1;
587 unsigned int exp :11;
588 unsigned int man_high :20;
589 unsigned int man_low :32;
595 struct AccessorDescriptor {
596 Object* (*getter)(Isolate* isolate, Object* object, void* data);
598 Isolate* isolate, JSObject* object, Object* value, void* data);
603 // -----------------------------------------------------------------------------
608 #define HAS_SMI_TAG(value) \
609 ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
611 // OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
612 #define OBJECT_POINTER_ALIGN(value) \
613 (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
615 // POINTER_SIZE_ALIGN returns the value aligned as a pointer.
616 #define POINTER_SIZE_ALIGN(value) \
617 (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
619 // CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
620 #define CODE_POINTER_ALIGN(value) \
621 (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
623 // Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
624 // inside a C++ class and new and delete will be overloaded so logging is
626 // This file (globals.h) is included before log.h, so we use direct calls to
627 // the Logger rather than the LOG macro.
629 #define TRACK_MEMORY(name) \
630 void* operator new(size_t size) { \
631 void* result = ::operator new(size); \
632 Logger::NewEventStatic(name, result, size); \
635 void operator delete(void* object) { \
636 Logger::DeleteEventStatic(name, object); \
637 ::operator delete(object); \
640 #define TRACK_MEMORY(name)
644 // CPU feature flags.
660 MOVW_MOVT_IMMEDIATE_LOADS,
676 NUMBER_OF_CPU_FEATURES
680 // Used to specify if a macro instruction must perform a smi check on tagged
689 EVAL_SCOPE, // The top-level scope for an eval source.
690 FUNCTION_SCOPE, // The top-level scope for a function.
691 MODULE_SCOPE, // The scope introduced by a module literal
692 SCRIPT_SCOPE, // The top-level scope for a script or a top-level eval.
693 CATCH_SCOPE, // The scope introduced by catch.
694 BLOCK_SCOPE, // The scope introduced by a new block.
695 WITH_SCOPE, // The scope introduced by with.
696 ARROW_SCOPE // The top-level scope for an arrow function literal.
700 const uint32_t kHoleNanUpper32 = 0xFFF7FFFF;
701 const uint32_t kHoleNanLower32 = 0xFFF7FFFF;
703 const uint64_t kHoleNanInt64 =
704 (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
707 // The order of this enum has to be kept in sync with the predicates below.
709 // User declared variables:
710 VAR, // declared via 'var', and 'function' declarations
712 CONST_LEGACY, // declared via legacy 'const' declarations
714 LET, // declared via 'let' declarations
716 CONST, // declared via 'const' declarations
718 // Variables introduced by the compiler:
719 INTERNAL, // like VAR, but not user-visible (may or may not
722 TEMPORARY, // temporary variables (not user-visible), stack-allocated
723 // unless the scope as a whole has forced context allocation
725 DYNAMIC, // always require dynamic lookup (we don't know
728 DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the
729 // variable is global unless it has been shadowed
730 // by an eval-introduced variable
732 DYNAMIC_LOCAL // requires dynamic lookup, but we know that the
733 // variable is local and where it is unless it
734 // has been shadowed by an eval-introduced
739 inline bool IsDynamicVariableMode(VariableMode mode) {
740 return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL;
744 inline bool IsDeclaredVariableMode(VariableMode mode) {
745 return mode >= VAR && mode <= CONST;
749 inline bool IsLexicalVariableMode(VariableMode mode) {
750 return mode == LET || mode == CONST;
754 inline bool IsImmutableVariableMode(VariableMode mode) {
755 return mode == CONST || mode == CONST_LEGACY;
759 // ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
760 // and immutable bindings that can be in two states: initialized and
761 // uninitialized. In ES5 only immutable bindings have these two states. When
762 // accessing a binding, it needs to be checked for initialization. However in
763 // the following cases the binding is initialized immediately after creation
764 // so the initialization check can always be skipped:
765 // 1. Var declared local variables.
767 // 2. A local variable introduced by a function declaration.
770 // function x(foo) {}
771 // 4. Catch bound variables.
772 // try {} catch (foo) {}
773 // 6. Function variables of named function expressions.
774 // var x = function foo() {}
775 // 7. Implicit binding of 'this'.
776 // 8. Implicit binding of 'arguments' in functions.
778 // ES5 specified object environment records which are introduced by ES elements
779 // such as Program and WithStatement that associate identifier bindings with the
780 // properties of some object. In the specification only mutable bindings exist
781 // (which may be non-writable) and have no distinct initialization step. However
782 // V8 allows const declarations in global code with distinct creation and
783 // initialization steps which are represented by non-writable properties in the
784 // global object. As a result also these bindings need to be checked for
787 // The following enum specifies a flag that indicates if the binding needs a
788 // distinct initialization step (kNeedsInitialization) or if the binding is
789 // immediately initialized upon creation (kCreatedInitialized).
790 enum InitializationFlag {
791 kNeedsInitialization,
796 enum MaybeAssignedFlag { kNotAssigned, kMaybeAssigned };
799 enum ClearExceptionFlag {
806 TREAT_MINUS_ZERO_AS_ZERO,
811 enum Signedness { kSigned, kUnsigned };
816 kArrowFunction = 1 << 0,
817 kGeneratorFunction = 1 << 1,
818 kConciseMethod = 1 << 2,
819 kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
820 kAccessorFunction = 1 << 3,
821 kDefaultConstructor = 1 << 4,
822 kSubclassConstructor = 1 << 5,
823 kBaseConstructor = 1 << 6,
824 kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
825 kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor
829 inline bool IsValidFunctionKind(FunctionKind kind) {
830 return kind == FunctionKind::kNormalFunction ||
831 kind == FunctionKind::kArrowFunction ||
832 kind == FunctionKind::kGeneratorFunction ||
833 kind == FunctionKind::kConciseMethod ||
834 kind == FunctionKind::kConciseGeneratorMethod ||
835 kind == FunctionKind::kAccessorFunction ||
836 kind == FunctionKind::kDefaultBaseConstructor ||
837 kind == FunctionKind::kDefaultSubclassConstructor ||
838 kind == FunctionKind::kBaseConstructor ||
839 kind == FunctionKind::kSubclassConstructor;
843 inline bool IsArrowFunction(FunctionKind kind) {
844 DCHECK(IsValidFunctionKind(kind));
845 return kind & FunctionKind::kArrowFunction;
849 inline bool IsGeneratorFunction(FunctionKind kind) {
850 DCHECK(IsValidFunctionKind(kind));
851 return kind & FunctionKind::kGeneratorFunction;
855 inline bool IsConciseMethod(FunctionKind kind) {
856 DCHECK(IsValidFunctionKind(kind));
857 return kind & FunctionKind::kConciseMethod;
861 inline bool IsAccessorFunction(FunctionKind kind) {
862 DCHECK(IsValidFunctionKind(kind));
863 return kind & FunctionKind::kAccessorFunction;
867 inline bool IsDefaultConstructor(FunctionKind kind) {
868 DCHECK(IsValidFunctionKind(kind));
869 return kind & FunctionKind::kDefaultConstructor;
873 inline bool IsBaseConstructor(FunctionKind kind) {
874 DCHECK(IsValidFunctionKind(kind));
875 return kind & FunctionKind::kBaseConstructor;
879 inline bool IsSubclassConstructor(FunctionKind kind) {
880 DCHECK(IsValidFunctionKind(kind));
881 return kind & FunctionKind::kSubclassConstructor;
885 inline bool IsConstructor(FunctionKind kind) {
886 DCHECK(IsValidFunctionKind(kind));
888 (FunctionKind::kBaseConstructor | FunctionKind::kSubclassConstructor |
889 FunctionKind::kDefaultConstructor);
891 } } // namespace v8::internal
893 namespace i = v8::internal;
895 #endif // V8_GLOBALS_H_