1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_STUB_CACHE_H_
6 #define V8_STUB_CACHE_H_
8 #include "src/macro-assembler.h"
14 // The stub cache is used for megamorphic property accesses.
15 // It maps (map, name, type) to property access handlers. The cache does not
16 // need explicit invalidation when a prototype chain is modified, since the
17 // handlers verify the chain.
20 class SCTableReference {
22 Address address() const { return address_; }
25 explicit SCTableReference(Address address) : address_(address) {}
29 friend class StubCache;
42 // Access cache for entry hash(name, map).
43 Code* Set(Name* name, Map* map, Code* code);
44 Code* Get(Name* name, Map* map, Code::Flags flags);
45 // Clear the lookup table (@ mark compact collection).
47 // Collect all maps that match the name and flags.
48 void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
49 Code::Flags flags, Handle<Context> native_context,
51 // Generate code for probing the stub cache table.
52 // Arguments extra, extra2 and extra3 may be used to pass additional scratch
53 // registers. Set to no_reg if not needed.
54 // If leave_frame is true, then exit a frame before the tail call.
55 void GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
56 Code::Flags flags, Register receiver, Register name,
57 Register scratch, Register extra, Register extra2 = no_reg,
58 Register extra3 = no_reg);
60 enum Table { kPrimary, kSecondary };
62 SCTableReference key_reference(StubCache::Table table) {
63 return SCTableReference(
64 reinterpret_cast<Address>(&first_entry(table)->key));
67 SCTableReference map_reference(StubCache::Table table) {
68 return SCTableReference(
69 reinterpret_cast<Address>(&first_entry(table)->map));
72 SCTableReference value_reference(StubCache::Table table) {
73 return SCTableReference(
74 reinterpret_cast<Address>(&first_entry(table)->value));
77 StubCache::Entry* first_entry(StubCache::Table table) {
79 case StubCache::kPrimary:
80 return StubCache::primary_;
81 case StubCache::kSecondary:
82 return StubCache::secondary_;
88 Isolate* isolate() { return isolate_; }
90 // Setting the entry size such that the index is shifted by Name::kHashShift
91 // is convenient; shifting down the length field (to extract the hash code)
92 // automatically discards the hash bit field.
93 static const int kCacheIndexShift = Name::kHashShift;
96 explicit StubCache(Isolate* isolate);
98 // The stub cache has a primary and secondary level. The two levels have
99 // different hashing algorithms in order to avoid simultaneous collisions
100 // in both caches. Unlike a probing strategy (quadratic or otherwise) the
101 // update strategy on updates is fairly clear and simple: Any existing entry
102 // in the primary cache is moved to the secondary cache, and secondary cache
103 // entries are overwritten.
105 // Hash algorithm for the primary table. This algorithm is replicated in
106 // assembler for every architecture. Returns an index into the table that
107 // is scaled by 1 << kCacheIndexShift.
108 static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
109 STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
110 // Compute the hash of the name (use entire hash field).
111 DCHECK(name->HasHashCode());
112 uint32_t field = name->hash_field();
113 // Using only the low bits in 64-bit mode is unlikely to increase the
114 // risk of collision even if the heap is spread over an area larger than
115 // 4Gb (and not at all if it isn't).
116 uint32_t map_low32bits =
117 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
118 // We always set the in_loop bit to zero when generating the lookup code
119 // so do it here too so the hash codes match.
121 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
122 // Base the offset on a simple combination of name, flags, and map.
123 uint32_t key = (map_low32bits + field) ^ iflags;
124 return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
127 // Hash algorithm for the secondary table. This algorithm is replicated in
128 // assembler for every architecture. Returns an index into the table that
129 // is scaled by 1 << kCacheIndexShift.
130 static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
131 // Use the seed from the primary cache in the secondary cache.
132 uint32_t name_low32bits =
133 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
134 // We always set the in_loop bit to zero when generating the lookup code
135 // so do it here too so the hash codes match.
137 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
138 uint32_t key = (seed - name_low32bits) + iflags;
139 return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
142 // Compute the entry for a given offset in exactly the same way as
143 // we do in generated code. We generate an hash code that already
144 // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple
145 // of sizeof(Entry). This makes it easier to avoid making mistakes
146 // in the hashed offset computations.
147 static Entry* entry(Entry* table, int offset) {
148 const int multiplier = sizeof(*table) >> Name::kHashShift;
149 return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
150 offset * multiplier);
153 static const int kPrimaryTableBits = 11;
154 static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
155 static const int kSecondaryTableBits = 9;
156 static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
159 Entry primary_[kPrimaryTableSize];
160 Entry secondary_[kSecondaryTableSize];
163 friend class Isolate;
164 friend class SCTableReference;
166 DISALLOW_COPY_AND_ASSIGN(StubCache);
168 } // namespace internal
171 #endif // V8_STUB_CACHE_H_