1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
11 #include "src/base/build_config.h"
12 #include "src/base/logging.h"
13 #include "src/base/macros.h"
15 // Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
16 // warning flag and certain versions of GCC due to a bug:
17 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
18 // For now, we use the more involved template-based version from <limits>, but
19 // only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
20 #if V8_CC_GNU && V8_GNUC_PREREQ(2, 96, 0) && !V8_GNUC_PREREQ(4, 1, 0)
21 # include <limits> // NOLINT
22 # define V8_INFINITY std::numeric_limits<double>::infinity()
24 # define V8_INFINITY HUGE_VAL
26 #define V8_INFINITY (__builtin_inff())
28 # define V8_INFINITY INFINITY
31 #if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \
32 V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || \
33 V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC
34 #define V8_TURBOFAN_BACKEND 1
36 #define V8_TURBOFAN_BACKEND 0
38 #if V8_TURBOFAN_BACKEND
39 #define V8_TURBOFAN_TARGET 1
41 #define V8_TURBOFAN_TARGET 0
54 // Determine whether we are running in a simulated environment.
55 // Setting USE_SIMULATOR explicitly from the build script will force
56 // the use of a simulated environment.
57 #if !defined(USE_SIMULATOR)
58 #if (V8_TARGET_ARCH_ARM64 && !V8_HOST_ARCH_ARM64)
59 #define USE_SIMULATOR 1
61 #if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
62 #define USE_SIMULATOR 1
64 #if (V8_TARGET_ARCH_PPC && !V8_HOST_ARCH_PPC)
65 #define USE_SIMULATOR 1
67 #if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
68 #define USE_SIMULATOR 1
70 #if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64)
71 #define USE_SIMULATOR 1
75 // Determine whether the architecture uses an out-of-line constant pool.
76 #define V8_OOL_CONSTANT_POOL 0
78 #ifdef V8_TARGET_ARCH_ARM
79 // Set stack limit lower for ARM than for other architectures because
80 // stack allocating MacroAssembler takes 120K bytes.
81 // See issue crbug.com/405338
82 #define V8_DEFAULT_STACK_SIZE_KB 864
84 // Slightly less than 1MB, since Windows' default stack size for
85 // the main execution thread is 1MB for both 32 and 64-bit.
86 #define V8_DEFAULT_STACK_SIZE_KB 984
90 // Determine whether double field unboxing feature is enabled.
91 #if V8_TARGET_ARCH_64_BIT
92 #define V8_DOUBLE_FIELDS_UNBOXING 1
94 #define V8_DOUBLE_FIELDS_UNBOXING 0
99 typedef byte* Address;
101 // -----------------------------------------------------------------------------
105 const int MB = KB * KB;
106 const int GB = KB * KB * KB;
107 const int kMaxInt = 0x7FFFFFFF;
108 const int kMinInt = -kMaxInt - 1;
109 const int kMaxInt8 = (1 << 7) - 1;
110 const int kMinInt8 = -(1 << 7);
111 const int kMaxUInt8 = (1 << 8) - 1;
112 const int kMinUInt8 = 0;
113 const int kMaxInt16 = (1 << 15) - 1;
114 const int kMinInt16 = -(1 << 15);
115 const int kMaxUInt16 = (1 << 16) - 1;
116 const int kMinUInt16 = 0;
118 const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
120 const int kCharSize = sizeof(char); // NOLINT
121 const int kShortSize = sizeof(short); // NOLINT
122 const int kIntSize = sizeof(int); // NOLINT
123 const int kInt32Size = sizeof(int32_t); // NOLINT
124 const int kInt64Size = sizeof(int64_t); // NOLINT
125 const int kDoubleSize = sizeof(double); // NOLINT
126 const int kIntptrSize = sizeof(intptr_t); // NOLINT
127 const int kPointerSize = sizeof(void*); // NOLINT
128 #if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
129 const int kRegisterSize = kPointerSize + kPointerSize;
131 const int kRegisterSize = kPointerSize;
133 const int kPCOnStackSize = kRegisterSize;
134 const int kFPOnStackSize = kRegisterSize;
136 const int kDoubleSizeLog2 = 3;
138 #if V8_HOST_ARCH_64_BIT
139 const int kPointerSizeLog2 = 3;
140 const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
141 const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
142 const bool kRequiresCodeRange = true;
143 const size_t kMaximalCodeRangeSize = 512 * MB;
145 const size_t kMinimumCodeRangeSize = 4 * MB;
146 const size_t kReservedCodeRangePages = 1;
148 const size_t kMinimumCodeRangeSize = 3 * MB;
149 const size_t kReservedCodeRangePages = 0;
152 const int kPointerSizeLog2 = 2;
153 const intptr_t kIntptrSignBit = 0x80000000;
154 const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
155 #if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
156 // x32 port also requires code range.
157 const bool kRequiresCodeRange = true;
158 const size_t kMaximalCodeRangeSize = 256 * MB;
159 const size_t kMinimumCodeRangeSize = 3 * MB;
160 const size_t kReservedCodeRangePages = 0;
162 const bool kRequiresCodeRange = false;
163 const size_t kMaximalCodeRangeSize = 0 * MB;
164 const size_t kMinimumCodeRangeSize = 0 * MB;
165 const size_t kReservedCodeRangePages = 0;
169 STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
171 const int kBitsPerByte = 8;
172 const int kBitsPerByteLog2 = 3;
173 const int kBitsPerPointer = kPointerSize * kBitsPerByte;
174 const int kBitsPerInt = kIntSize * kBitsPerByte;
176 // IEEE 754 single precision floating point number bit layout.
177 const uint32_t kBinary32SignMask = 0x80000000u;
178 const uint32_t kBinary32ExponentMask = 0x7f800000u;
179 const uint32_t kBinary32MantissaMask = 0x007fffffu;
180 const int kBinary32ExponentBias = 127;
181 const int kBinary32MaxExponent = 0xFE;
182 const int kBinary32MinExponent = 0x01;
183 const int kBinary32MantissaBits = 23;
184 const int kBinary32ExponentShift = 23;
186 // Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
188 const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
190 // Latin1/UTF-16 constants
191 // Code-point values in Unicode 4.0 are 21 bits wide.
192 // Code units in UTF-16 are 16 bits wide.
193 typedef uint16_t uc16;
194 typedef int32_t uc32;
195 const int kOneByteSize = kCharSize;
196 const int kUC16Size = sizeof(uc16); // NOLINT
199 // Round up n to be a multiple of sz, where sz is a power of 2.
200 #define ROUND_UP(n, sz) (((n) + ((sz) - 1)) & ~((sz) - 1))
203 // FUNCTION_ADDR(f) gets the address of a C function f.
204 #define FUNCTION_ADDR(f) \
205 (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
208 // FUNCTION_CAST<F>(addr) casts an address into a function
209 // of type F. Used to invoke generated code from within C.
210 template <typename F>
211 F FUNCTION_CAST(Address addr) {
212 return reinterpret_cast<F>(reinterpret_cast<intptr_t>(addr));
216 // -----------------------------------------------------------------------------
217 // Forward declarations for frequently used classes
218 // (sorted alphabetically)
220 class FreeStoreAllocationPolicy;
221 template <typename T, class P = FreeStoreAllocationPolicy> class List;
223 // -----------------------------------------------------------------------------
224 // Declarations for use in both the preparser and the rest of V8.
226 // The Strict Mode (ECMA-262 5th edition, 4.2.2).
229 // LanguageMode is expressed as a bitmask. Descriptions of the bits:
234 // Shorthands for some common language modes.
237 STRONG = STRICT_BIT | STRONG_BIT
241 inline bool is_sloppy(LanguageMode language_mode) {
242 return (language_mode & STRICT_BIT) == 0;
246 inline bool is_strict(LanguageMode language_mode) {
247 return language_mode & STRICT_BIT;
251 inline bool is_strong(LanguageMode language_mode) {
252 return language_mode & STRONG_BIT;
256 inline bool is_valid_language_mode(int language_mode) {
257 return language_mode == SLOPPY || language_mode == STRICT ||
258 language_mode == STRONG;
262 inline LanguageMode construct_language_mode(bool strict_bit, bool strong_bit) {
263 int language_mode = 0;
264 if (strict_bit) language_mode |= STRICT_BIT;
265 if (strong_bit) language_mode |= STRONG_BIT;
266 DCHECK(is_valid_language_mode(language_mode));
267 return static_cast<LanguageMode>(language_mode);
271 // Mask for the sign bit in a smi.
272 const intptr_t kSmiSignMask = kIntptrSignBit;
274 const int kObjectAlignmentBits = kPointerSizeLog2;
275 const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
276 const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
278 // Desired alignment for pointers.
279 const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
280 const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
282 // Desired alignment for double values.
283 const intptr_t kDoubleAlignment = 8;
284 const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
286 // Desired alignment for generated code is 32 bytes (to improve cache line
288 const int kCodeAlignmentBits = 5;
289 const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
290 const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
292 // The owner field of a page is tagged with the page header tag. We need that
293 // to find out if a slot is part of a large object. If we mask out the lower
294 // 0xfffff bits (1M pages), go to the owner offset, and see that this field
295 // is tagged with the page header tag, we can just look up the owner.
296 // Otherwise, we know that we are somewhere (not within the first 1M) in a
298 const int kPageHeaderTag = 3;
299 const int kPageHeaderTagSize = 2;
300 const intptr_t kPageHeaderTagMask = (1 << kPageHeaderTagSize) - 1;
303 // Zap-value: The value used for zapping dead objects.
304 // Should be a recognizable hex value tagged as a failure.
305 #ifdef V8_HOST_ARCH_64_BIT
306 const Address kZapValue =
307 reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef));
308 const Address kHandleZapValue =
309 reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf));
310 const Address kGlobalHandleZapValue =
311 reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf));
312 const Address kFromSpaceZapValue =
313 reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
314 const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
315 const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
316 const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
318 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
319 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
320 const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf);
321 const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
322 const uint32_t kSlotsZapValue = 0xbeefdeef;
323 const uint32_t kDebugZapValue = 0xbadbaddb;
324 const uint32_t kFreeListZapValue = 0xfeed1eaf;
327 const int kCodeZapValue = 0xbadc0de;
328 const uint32_t kPhantomReferenceZap = 0xca11bac;
330 // On Intel architecture, cache line size is 64 bytes.
331 // On ARM it may be less (32 bytes), but as far this constant is
332 // used for aligning data, it doesn't hurt to align on a greater value.
333 #define PROCESSOR_CACHE_LINE_SIZE 64
335 // Constants relevant to double precision floating point numbers.
336 // If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
337 const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
340 // -----------------------------------------------------------------------------
341 // Forward declarations for frequently used classes
355 class DescriptorArray;
356 class TransitionArray;
357 class ExternalReference;
359 class FunctionTemplateInfo;
361 class SeededNumberDictionary;
362 class UnseededNumberDictionary;
363 class NameDictionary;
364 template <typename T> class MaybeHandle;
365 template <typename T> class Handle;
369 class InterceptorInfo;
375 class LargeObjectSpace;
376 class MacroAssembler;
379 class MarkCompactCollector;
388 template <typename Config, class Allocator = FreeStoreAllocationPolicy>
398 class MessageLocation;
400 typedef bool (*WeakSlotCallback)(Object** pointer);
402 typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
404 // -----------------------------------------------------------------------------
407 // NOTE: SpaceIterator depends on AllocationSpace enumeration values being
409 // Keep this enum in sync with the ObjectSpace enum in v8.h
410 enum AllocationSpace {
411 NEW_SPACE, // Semispaces collected with copying collector.
412 OLD_POINTER_SPACE, // May contain pointers to new space.
413 OLD_DATA_SPACE, // Must not have pointers to new space.
414 CODE_SPACE, // No pointers to new space, marked executable.
415 MAP_SPACE, // Only and all map objects.
416 CELL_SPACE, // Only and all cell objects.
417 LO_SPACE, // Promoted large objects.
419 FIRST_SPACE = NEW_SPACE,
420 LAST_SPACE = LO_SPACE,
421 FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
422 LAST_PAGED_SPACE = CELL_SPACE
424 const int kSpaceTagSize = 3;
425 const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
428 // A flag that indicates whether objects should be pretenured when
429 // allocated (allocated directly into the old generation) or not
430 // (allocated in the young generation if the object size and type
432 enum PretenureFlag { NOT_TENURED, TENURED };
434 enum MinimumCapacity {
435 USE_DEFAULT_MINIMUM_CAPACITY,
436 USE_CUSTOM_MINIMUM_CAPACITY
439 enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
441 enum Executability { NOT_EXECUTABLE, EXECUTABLE };
445 VISIT_ALL_IN_SCAVENGE,
446 VISIT_ALL_IN_SWEEP_NEWSPACE,
450 // Flag indicating whether code is built into the VM (one of the natives files).
451 enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
454 // ParseRestriction is used to restrict the set of valid statements in a
455 // unit of compilation. Restriction violations cause a syntax error.
456 enum ParseRestriction {
457 NO_PARSE_RESTRICTION, // All expressions are allowed.
458 ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
461 // A CodeDesc describes a buffer holding instructions and relocation
462 // information. The instructions start at the beginning of the buffer
463 // and grow forward, the relocation information starts at the end of
464 // the buffer and grows backward.
466 // |<--------------- buffer_size ---------------->|
467 // |<-- instr_size -->| |<-- reloc_size -->|
468 // +==================+========+==================+
469 // | instructions | free | reloc info |
470 // +==================+========+==================+
484 // Callback function used for iterating objects in heap spaces,
485 // for example, scanning heap objects.
486 typedef int (*HeapObjectCallback)(HeapObject* obj);
489 // Callback function used for checking constraints when copying/relocating
490 // objects. Returns true if an object can be copied/relocated from its
491 // old_addr to a new_addr.
492 typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
495 // Callback function on inline caches, used for iterating over inline caches
497 typedef void (*InlineCacheCallback)(Code* code, Address ic);
500 // State for inline cache call sites. Aliased as IC::State.
501 enum InlineCacheState {
502 // Has never been executed.
504 // Has been executed but monomorhic state has been delayed.
506 // Has been executed and only one receiver type has been seen.
508 // Check failed due to prototype (or map deprecation).
510 // Multiple receiver types have been seen.
512 // Many receiver types have been seen.
514 // A generic handler is installed and no extra typefeedback is recorded.
516 // Special state for debug break or step in prepare stubs.
518 // Type-vector-based ICs have a default state, with the full calculation
519 // of IC state only determined by a look at the IC and the typevector
525 enum CallFunctionFlags {
526 NO_CALL_FUNCTION_FLAGS,
528 // Always wrap the receiver and call to the JSFunction. Only use this flag
529 // both the receiver type and the target method are statically known.
534 enum CallConstructorFlags {
535 NO_CALL_CONSTRUCTOR_FLAGS = 0,
536 // The call target is cached in the instruction stream.
537 RECORD_CONSTRUCTOR_TARGET = 1,
538 SUPER_CONSTRUCTOR_CALL = 1 << 1,
539 SUPER_CALL_RECORD_TARGET = SUPER_CONSTRUCTOR_CALL | RECORD_CONSTRUCTOR_TARGET
543 enum CacheHolderFlag {
545 kCacheOnPrototypeReceiverIsDictionary,
546 kCacheOnPrototypeReceiverIsPrimitive,
551 // The Store Buffer (GC).
553 kStoreBufferFullEvent,
554 kStoreBufferStartScanningPagesEvent,
555 kStoreBufferScanningPageEvent
559 typedef void (*StoreBufferCallback)(Heap* heap,
561 StoreBufferEvent event);
564 // Union used for fast testing of specific double values.
565 union DoubleRepresentation {
568 DoubleRepresentation(double x) { value = x; }
569 bool operator==(const DoubleRepresentation& other) const {
570 return bits == other.bits;
575 // Union used for customized checking of the IEEE double types
576 // inlined within v8 runtime, rather than going to the underlying
577 // platform headers and libraries
578 union IeeeDoubleLittleEndianArchType {
581 unsigned int man_low :32;
582 unsigned int man_high :20;
583 unsigned int exp :11;
584 unsigned int sign :1;
589 union IeeeDoubleBigEndianArchType {
592 unsigned int sign :1;
593 unsigned int exp :11;
594 unsigned int man_high :20;
595 unsigned int man_low :32;
601 struct AccessorDescriptor {
602 Object* (*getter)(Isolate* isolate, Object* object, void* data);
604 Isolate* isolate, JSObject* object, Object* value, void* data);
609 // -----------------------------------------------------------------------------
614 #define HAS_SMI_TAG(value) \
615 ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
617 // OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
618 #define OBJECT_POINTER_ALIGN(value) \
619 (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
621 // POINTER_SIZE_ALIGN returns the value aligned as a pointer.
622 #define POINTER_SIZE_ALIGN(value) \
623 (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
625 // CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
626 #define CODE_POINTER_ALIGN(value) \
627 (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
629 // Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
630 // inside a C++ class and new and delete will be overloaded so logging is
632 // This file (globals.h) is included before log.h, so we use direct calls to
633 // the Logger rather than the LOG macro.
635 #define TRACK_MEMORY(name) \
636 void* operator new(size_t size) { \
637 void* result = ::operator new(size); \
638 Logger::NewEventStatic(name, result, size); \
641 void operator delete(void* object) { \
642 Logger::DeleteEventStatic(name, object); \
643 ::operator delete(object); \
646 #define TRACK_MEMORY(name)
650 // CPU feature flags.
666 MOVW_MOVT_IMMEDIATE_LOADS,
682 NUMBER_OF_CPU_FEATURES
686 // Used to specify if a macro instruction must perform a smi check on tagged
695 EVAL_SCOPE, // The top-level scope for an eval source.
696 FUNCTION_SCOPE, // The top-level scope for a function.
697 MODULE_SCOPE, // The scope introduced by a module literal
698 SCRIPT_SCOPE, // The top-level scope for a script or a top-level eval.
699 CATCH_SCOPE, // The scope introduced by catch.
700 BLOCK_SCOPE, // The scope introduced by a new block.
701 WITH_SCOPE, // The scope introduced by with.
702 ARROW_SCOPE // The top-level scope for an arrow function literal.
705 // The mips architecture prior to revision 5 has inverted encoding for sNaN.
706 #if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6)) || \
707 (V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6))
708 const uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
709 const uint32_t kHoleNanLower32 = 0xFFFF7FFF;
711 const uint32_t kHoleNanUpper32 = 0xFFF7FFFF;
712 const uint32_t kHoleNanLower32 = 0xFFF7FFFF;
715 const uint64_t kHoleNanInt64 =
716 (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
719 // The order of this enum has to be kept in sync with the predicates below.
721 // User declared variables:
722 VAR, // declared via 'var', and 'function' declarations
724 CONST_LEGACY, // declared via legacy 'const' declarations
726 LET, // declared via 'let' declarations (first lexical)
728 CONST, // declared via 'const' declarations
730 IMPORT, // declared via 'import' declarations (last lexical)
732 // Variables introduced by the compiler:
733 INTERNAL, // like VAR, but not user-visible (may or may not
736 TEMPORARY, // temporary variables (not user-visible), stack-allocated
737 // unless the scope as a whole has forced context allocation
739 DYNAMIC, // always require dynamic lookup (we don't know
742 DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the
743 // variable is global unless it has been shadowed
744 // by an eval-introduced variable
746 DYNAMIC_LOCAL // requires dynamic lookup, but we know that the
747 // variable is local and where it is unless it
748 // has been shadowed by an eval-introduced
753 inline bool IsDynamicVariableMode(VariableMode mode) {
754 return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL;
758 inline bool IsDeclaredVariableMode(VariableMode mode) {
759 return mode >= VAR && mode <= IMPORT;
763 inline bool IsLexicalVariableMode(VariableMode mode) {
764 return mode >= LET && mode <= IMPORT;
768 inline bool IsImmutableVariableMode(VariableMode mode) {
769 return mode == CONST || mode == CONST_LEGACY || mode == IMPORT;
773 // ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
774 // and immutable bindings that can be in two states: initialized and
775 // uninitialized. In ES5 only immutable bindings have these two states. When
776 // accessing a binding, it needs to be checked for initialization. However in
777 // the following cases the binding is initialized immediately after creation
778 // so the initialization check can always be skipped:
779 // 1. Var declared local variables.
781 // 2. A local variable introduced by a function declaration.
784 // function x(foo) {}
785 // 4. Catch bound variables.
786 // try {} catch (foo) {}
787 // 6. Function variables of named function expressions.
788 // var x = function foo() {}
789 // 7. Implicit binding of 'this'.
790 // 8. Implicit binding of 'arguments' in functions.
792 // ES5 specified object environment records which are introduced by ES elements
793 // such as Program and WithStatement that associate identifier bindings with the
794 // properties of some object. In the specification only mutable bindings exist
795 // (which may be non-writable) and have no distinct initialization step. However
796 // V8 allows const declarations in global code with distinct creation and
797 // initialization steps which are represented by non-writable properties in the
798 // global object. As a result also these bindings need to be checked for
801 // The following enum specifies a flag that indicates if the binding needs a
802 // distinct initialization step (kNeedsInitialization) or if the binding is
803 // immediately initialized upon creation (kCreatedInitialized).
804 enum InitializationFlag {
805 kNeedsInitialization,
810 enum MaybeAssignedFlag { kNotAssigned, kMaybeAssigned };
813 // Serialized in PreparseData, so numeric values should not be changed.
814 enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
817 enum ClearExceptionFlag {
824 TREAT_MINUS_ZERO_AS_ZERO,
829 enum Signedness { kSigned, kUnsigned };
834 kArrowFunction = 1 << 0,
835 kGeneratorFunction = 1 << 1,
836 kConciseMethod = 1 << 2,
837 kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
838 kAccessorFunction = 1 << 3,
839 kDefaultConstructor = 1 << 4,
840 kSubclassConstructor = 1 << 5,
841 kBaseConstructor = 1 << 6,
842 kInObjectLiteral = 1 << 7,
843 kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
844 kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
845 kConciseMethodInObjectLiteral = kConciseMethod | kInObjectLiteral,
846 kConciseGeneratorMethodInObjectLiteral =
847 kConciseGeneratorMethod | kInObjectLiteral,
848 kAccessorFunctionInObjectLiteral = kAccessorFunction | kInObjectLiteral,
852 inline bool IsValidFunctionKind(FunctionKind kind) {
853 return kind == FunctionKind::kNormalFunction ||
854 kind == FunctionKind::kArrowFunction ||
855 kind == FunctionKind::kGeneratorFunction ||
856 kind == FunctionKind::kConciseMethod ||
857 kind == FunctionKind::kConciseGeneratorMethod ||
858 kind == FunctionKind::kAccessorFunction ||
859 kind == FunctionKind::kDefaultBaseConstructor ||
860 kind == FunctionKind::kDefaultSubclassConstructor ||
861 kind == FunctionKind::kBaseConstructor ||
862 kind == FunctionKind::kSubclassConstructor ||
863 kind == FunctionKind::kConciseMethodInObjectLiteral ||
864 kind == FunctionKind::kConciseGeneratorMethodInObjectLiteral ||
865 kind == FunctionKind::kAccessorFunctionInObjectLiteral;
869 inline bool IsArrowFunction(FunctionKind kind) {
870 DCHECK(IsValidFunctionKind(kind));
871 return kind & FunctionKind::kArrowFunction;
875 inline bool IsGeneratorFunction(FunctionKind kind) {
876 DCHECK(IsValidFunctionKind(kind));
877 return kind & FunctionKind::kGeneratorFunction;
881 inline bool IsConciseMethod(FunctionKind kind) {
882 DCHECK(IsValidFunctionKind(kind));
883 return kind & FunctionKind::kConciseMethod;
887 inline bool IsAccessorFunction(FunctionKind kind) {
888 DCHECK(IsValidFunctionKind(kind));
889 return kind & FunctionKind::kAccessorFunction;
893 inline bool IsDefaultConstructor(FunctionKind kind) {
894 DCHECK(IsValidFunctionKind(kind));
895 return kind & FunctionKind::kDefaultConstructor;
899 inline bool IsBaseConstructor(FunctionKind kind) {
900 DCHECK(IsValidFunctionKind(kind));
901 return kind & FunctionKind::kBaseConstructor;
905 inline bool IsSubclassConstructor(FunctionKind kind) {
906 DCHECK(IsValidFunctionKind(kind));
907 return kind & FunctionKind::kSubclassConstructor;
911 inline bool IsConstructor(FunctionKind kind) {
912 DCHECK(IsValidFunctionKind(kind));
914 (FunctionKind::kBaseConstructor | FunctionKind::kSubclassConstructor |
915 FunctionKind::kDefaultConstructor);
919 inline bool IsInObjectLiteral(FunctionKind kind) {
920 DCHECK(IsValidFunctionKind(kind));
921 return kind & FunctionKind::kInObjectLiteral;
925 inline FunctionKind WithObjectLiteralBit(FunctionKind kind) {
926 kind = static_cast<FunctionKind>(kind | FunctionKind::kInObjectLiteral);
927 DCHECK(IsValidFunctionKind(kind));
930 } } // namespace v8::internal
932 namespace i = v8::internal;
934 #endif // V8_GLOBALS_H_