From cfbc1eed9c8e882d324aaf82a2ced977fe11ba30 Mon Sep 17 00:00:00 2001 From: "kaznacheev@chromium.org" Date: Fri, 24 Sep 2010 08:18:33 +0000 Subject: [PATCH] Prevent modification of cached normalized maps. Finally sovles the problem that r5342 attempted to solve. When adding a stub to a map's code cache we need to make sure that this map is not used by object that do not need this stub. Existing solution had 2 flaws: 1. It checked that the map is cached by asking the current context. If the object escaped into another context then NormalizedMapCache::Contains returns false negative. 2. If a map gets evicted from the cache we should not try to modify it even though Contains returns false. This patch implements much less fragile solution of the same problem: A map now has a flag (is_shared) that is set once the map is added to a cache, stays set even after the cache eviction, and is cleared if the object goes back to fast mode. Added a regression test. Review URL: http://codereview.chromium.org/3472006 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5518 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/objects-debug.cc | 5 +-- src/objects-inl.h | 13 ++++++++ src/objects.cc | 85 +++++++++++++++++++------------------------------ src/objects.h | 25 +++++++++++---- test/cctest/test-api.cc | 43 +++++++++++++++++++++++++ 5 files changed, 110 insertions(+), 61 deletions(-) diff --git a/src/objects-debug.cc b/src/objects-debug.cc index 6d49d75..078b23f 100644 --- a/src/objects-debug.cc +++ b/src/objects-debug.cc @@ -649,8 +649,9 @@ void Map::MapVerify() { } -void Map::NormalizedMapVerify() { +void Map::SharedMapVerify() { MapVerify(); + ASSERT(is_shared()); ASSERT_EQ(Heap::empty_descriptor_array(), instance_descriptors()); ASSERT_EQ(Heap::empty_fixed_array(), code_cache()); ASSERT_EQ(0, pre_allocated_property_fields()); @@ -1381,7 +1382,7 @@ void NormalizedMapCache::NormalizedMapCacheVerify() { for (int i = 0; i < length(); i++) { Object* e = get(i); if (e->IsMap()) { - Map::cast(e)->NormalizedMapVerify(); + Map::cast(e)->SharedMapVerify(); } else { ASSERT(e->IsUndefined()); } diff --git a/src/objects-inl.h b/src/objects-inl.h index 93f752f..2bbdeaa 100644 --- a/src/objects-inl.h +++ b/src/objects-inl.h @@ -2292,6 +2292,19 @@ bool Map::attached_to_shared_function_info() { } +void Map::set_is_shared(bool value) { + if (value) { + set_bit_field2(bit_field2() | (1 << kIsShared)); + } else { + set_bit_field2(bit_field2() & ~(1 << kIsShared)); + } +} + +bool Map::is_shared() { + return ((1 << kIsShared) & bit_field2()) != 0; +} + + JSFunction* Map::unchecked_constructor() { return reinterpret_cast(READ_FIELD(this, kConstructorOffset)); } diff --git a/src/objects.cc b/src/objects.cc index 3ab6ef5..737bf57 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -2099,61 +2099,34 @@ PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) { } -bool NormalizedMapCache::IsCacheable(JSObject* object) { - // Caching for global objects is not worth it (there are too few of them). - return !object->IsGlobalObject(); -} - - Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) { - Object* result; - Map* fast = obj->map(); - if (!IsCacheable(obj)) { - result = fast->CopyNormalized(mode); - if (result->IsFailure()) return result; - } else { - int index = Hash(fast) % kEntries; - result = get(index); - - if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) { + int index = Hash(fast) % kEntries; + Object* result = get(index); + if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) { #ifdef DEBUG - if (FLAG_enable_slow_asserts) { - // Make sure that the new slow map has exactly the same hash as the - // original fast map. This way we can use hash to check if a slow map - // is already in the hash (see Contains method). - ASSERT(Hash(fast) == Hash(Map::cast(result))); - // The cached map should match newly created normalized map bit-by-bit. - Object* fresh = fast->CopyNormalized(mode); - if (!fresh->IsFailure()) { - ASSERT(memcmp(Map::cast(fresh)->address(), - Map::cast(result)->address(), - Map::kSize) == 0); - } + if (FLAG_enable_slow_asserts) { + // The cached map should match newly created normalized map bit-by-bit. + Object* fresh = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP); + if (!fresh->IsFailure()) { + ASSERT(memcmp(Map::cast(fresh)->address(), + Map::cast(result)->address(), + Map::kSize) == 0); } -#endif - return result; } - - result = fast->CopyNormalized(mode); - if (result->IsFailure()) return result; - set(index, result); +#endif + return result; } + + result = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP); + if (result->IsFailure()) return result; + set(index, result); Counters::normalized_maps.Increment(); return result; } -bool NormalizedMapCache::Contains(Map* map) { - // If the map is present in the cache it can only be at one place: - // at the index calculated from the hash. We assume that a slow map has the - // same hash as a fast map it has been generated from. - int index = Hash(map) % kEntries; - return get(index) == map; -} - - void NormalizedMapCache::Clear() { int entries = length(); for (int i = 0; i != entries; i++) { @@ -2184,7 +2157,7 @@ bool NormalizedMapCache::CheckHit(Map* slow, Map* fast, PropertyNormalizationMode mode) { #ifdef DEBUG - slow->NormalizedMapVerify(); + slow->SharedMapVerify(); #endif return slow->constructor() == fast->constructor() && @@ -2194,17 +2167,17 @@ bool NormalizedMapCache::CheckHit(Map* slow, fast->inobject_properties()) && slow->instance_type() == fast->instance_type() && slow->bit_field() == fast->bit_field() && - slow->bit_field2() == fast->bit_field2(); + (slow->bit_field2() & ~(1<bit_field2(); } Object* JSObject::UpdateMapCodeCache(String* name, Code* code) { - if (!HasFastProperties() && - NormalizedMapCache::IsCacheable(this) && - Top::context()->global_context()->normalized_map_cache()-> - Contains(map())) { - // Replace the map with the identical copy that can be safely modified. - Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES); + if (map()->is_shared()) { + // Fast case maps are never marked as shared. + ASSERT(!HasFastProperties()); + // Replace the map with an identical copy that can be safely modified. + Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES, + UNIQUE_NORMALIZED_MAP); if (obj->IsFailure()) return obj; Counters::normalized_maps.Increment(); @@ -3189,12 +3162,14 @@ Object* Map::CopyDropDescriptors() { } Map::cast(result)->set_bit_field(bit_field()); Map::cast(result)->set_bit_field2(bit_field2()); + Map::cast(result)->set_is_shared(false); Map::cast(result)->ClearCodeCache(); return result; } -Object* Map::CopyNormalized(PropertyNormalizationMode mode) { +Object* Map::CopyNormalized(PropertyNormalizationMode mode, + NormalizedMapSharingMode sharing) { int new_instance_size = instance_size(); if (mode == CLEAR_INOBJECT_PROPERTIES) { new_instance_size -= inobject_properties() * kPointerSize; @@ -3213,8 +3188,12 @@ Object* Map::CopyNormalized(PropertyNormalizationMode mode) { Map::cast(result)->set_bit_field(bit_field()); Map::cast(result)->set_bit_field2(bit_field2()); + Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP); + #ifdef DEBUG - Map::cast(result)->NormalizedMapVerify(); + if (Map::cast(result)->is_shared()) { + Map::cast(result)->SharedMapVerify(); + } #endif return result; diff --git a/src/objects.h b/src/objects.h index 6c4af25..307f9cd 100644 --- a/src/objects.h +++ b/src/objects.h @@ -200,6 +200,14 @@ enum PropertyNormalizationMode { }; +// NormalizedMapSharingMode is used to specify whether a map may be shared +// by different objects with normalized properties. +enum NormalizedMapSharingMode { + UNIQUE_NORMALIZED_MAP, + SHARED_NORMALIZED_MAP +}; + + // Instance size sentinel for objects of variable size. static const int kVariableSizeSentinel = 0; @@ -2509,12 +2517,8 @@ class NormalizedMapCache: public FixedArray { public: static const int kEntries = 64; - static bool IsCacheable(JSObject* object); - Object* Get(JSObject* object, PropertyNormalizationMode mode); - bool Contains(Map* map); - void Clear(); // Casting @@ -3176,6 +3180,13 @@ class Map: public HeapObject { inline bool attached_to_shared_function_info(); + // Tells whether the map is shared between objects that may have different + // behavior. If true, the map should never be modified, instead a clone + // should be created and modified. + inline void set_is_shared(bool value); + + inline bool is_shared(); + // Tells whether the instance needs security checks when accessing its // properties. inline void set_is_access_check_needed(bool access_check_needed); @@ -3197,7 +3208,8 @@ class Map: public HeapObject { MUST_USE_RESULT Object* CopyDropDescriptors(); - MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode); + MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode, + NormalizedMapSharingMode sharing); // Returns a copy of the map, with all transitions dropped from the // instance descriptors. @@ -3261,7 +3273,7 @@ class Map: public HeapObject { #ifdef DEBUG void MapPrint(); void MapVerify(); - void NormalizedMapVerify(); + void SharedMapVerify(); #endif inline int visitor_id(); @@ -3325,6 +3337,7 @@ class Map: public HeapObject { static const int kHasFastElements = 2; static const int kStringWrapperSafeForDefaultValueOf = 3; static const int kAttachedToSharedFunctionInfo = 4; + static const int kIsShared = 5; // Layout of the default cache. It holds alternating name and code objects. static const int kCodeCacheEntrySize = 2; diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index 1963ff0..1b6359b 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -2937,6 +2937,49 @@ THREADED_TEST(NamedInterceptorDictionaryIC) { } +THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) { + v8::HandleScope scope; + + v8::Persistent context1 = Context::New(); + + context1->Enter(); + Local templ = ObjectTemplate::New(); + templ->SetNamedPropertyHandler(XPropertyGetter); + // Create an object with a named interceptor. + v8::Local object = templ->NewInstance(); + context1->Global()->Set(v8_str("interceptor_obj"), object); + + // Force the object into the slow case. + CompileRun("interceptor_obj.y = 0;" + "delete interceptor_obj.y;"); + context1->Exit(); + + { + // Introduce the object into a different context. + // Repeat named loads to exercise ICs. + LocalContext context2; + context2->Global()->Set(v8_str("interceptor_obj"), object); + Local result = + CompileRun("function get_x(o) { return o.x; }" + "interceptor_obj.x = 42;" + "for (var i=0; i != 10; i++) {" + " get_x(interceptor_obj);" + "}" + "get_x(interceptor_obj)"); + // Check that the interceptor was actually invoked. + CHECK_EQ(result, v8_str("x")); + } + + // Return to the original context and force some object to the slow case + // to cause the NormalizedMapCache to verify. + context1->Enter(); + CompileRun("var obj = { x : 0 }; delete obj.x;"); + context1->Exit(); + + context1.Dispose(); +} + + static v8::Handle SetXOnPrototypeGetter(Local property, const AccessorInfo& info) { // Set x on the prototype object and do not handle the get request. -- 2.7.4