Workaround 'label handle_thr_start is not used' cppcheck style warning
[platform/upstream/libgc.git] / finalize.c
index 4b513fc..c8708b9 100644 (file)
@@ -89,22 +89,28 @@ STATIC struct fnlz_roots_s {
 
 GC_API void GC_CALL GC_push_finalizer_structures(void)
 {
-  GC_ASSERT((word)&GC_dl_hashtbl.head % sizeof(word) == 0);
-  GC_ASSERT((word)&GC_fnlz_roots % sizeof(word) == 0);
+  GC_ASSERT((word)(&GC_dl_hashtbl.head) % sizeof(word) == 0);
+  GC_ASSERT((word)(&GC_fnlz_roots) % sizeof(word) == 0);
 # ifndef GC_LONG_REFS_NOT_NEEDED
-    GC_ASSERT((word)&GC_ll_hashtbl.head % sizeof(word) == 0);
+    GC_ASSERT((word)(&GC_ll_hashtbl.head) % sizeof(word) == 0);
     GC_PUSH_ALL_SYM(GC_ll_hashtbl.head);
 # endif
   GC_PUSH_ALL_SYM(GC_dl_hashtbl.head);
   GC_PUSH_ALL_SYM(GC_fnlz_roots);
 }
 
-/* Double the size of a hash table. *size_ptr is the log of its current */
-/* size.  May be a no-op.                                               */
+/* Threshold of log_size to initiate full collection before growing     */
+/* a hash table.                                                        */
+#ifndef GC_ON_GROW_LOG_SIZE_MIN
+# define GC_ON_GROW_LOG_SIZE_MIN CPP_LOG_HBLKSIZE
+#endif
+
+/* Double the size of a hash table. *log_size_ptr is the log of its     */
+/* current size.  May be a no-op.                                       */
 /* *table is a pointer to an array of hash headers.  If we succeed, we  */
 /* update both *table and *log_size_ptr.  Lock is held.                 */
 STATIC void GC_grow_table(struct hash_chain_entry ***table,
-                          signed_word *log_size_ptr)
+                          signed_word *log_size_ptr, word *entries_ptr)
 {
     word i;
     struct hash_chain_entry *p;
@@ -116,6 +122,21 @@ STATIC void GC_grow_table(struct hash_chain_entry ***table,
     struct hash_chain_entry **new_table;
 
     GC_ASSERT(I_HOLD_LOCK());
+    /* Avoid growing the table in case of at least 25% of entries can   */
+    /* be deleted by enforcing a collection.  Ignored for small tables. */
+    /* In incremental mode we skip this optimization, as we want to     */
+    /* avoid triggering a full GC whenever possible.                    */
+    if (log_old_size >= GC_ON_GROW_LOG_SIZE_MIN && !GC_incremental) {
+      IF_CANCEL(int cancel_state;)
+
+      DISABLE_CANCEL(cancel_state);
+      (void)GC_try_to_collect_inner(GC_never_stop_func);
+      RESTORE_CANCEL(cancel_state);
+      /* GC_finalize might decrease entries value.  */
+      if (*entries_ptr < ((word)1 << log_old_size) - (*entries_ptr >> 2))
+        return;
+    }
+
     new_table = (struct hash_chain_entry **)
                     GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
                         (size_t)new_size * sizeof(struct hash_chain_entry *),
@@ -135,12 +156,14 @@ STATIC void GC_grow_table(struct hash_chain_entry ***table,
         size_t new_hash = HASH3(real_key, new_size, log_new_size);
 
         p -> next = new_table[new_hash];
+        GC_dirty(p);
         new_table[new_hash] = p;
         p = next;
       }
     }
     *log_size_ptr = log_new_size;
     *table = new_table;
+    GC_dirty(new_table); /* entire object */
 }
 
 GC_API int GC_CALL GC_register_disappearing_link(void * * link)
@@ -168,7 +191,7 @@ STATIC int GC_register_disappearing_link_inner(
     if (dl_hashtbl -> log_size == -1
         || dl_hashtbl -> entries > ((word)1 << dl_hashtbl -> log_size)) {
         GC_grow_table((struct hash_chain_entry ***)&dl_hashtbl -> head,
-                      &dl_hashtbl -> log_size);
+                      &dl_hashtbl -> log_size, &dl_hashtbl -> entries);
 #       ifdef LINT2
           if (dl_hashtbl->log_size < 0) ABORT("log_size is negative");
 #       endif
@@ -215,8 +238,10 @@ STATIC int GC_register_disappearing_link_inner(
     new_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
     new_dl -> dl_hidden_link = GC_HIDE_POINTER(link);
     dl_set_next(new_dl, dl_hashtbl -> head[index]);
+    GC_dirty(new_dl);
     dl_hashtbl -> head[index] = new_dl;
     dl_hashtbl -> entries++;
+    GC_dirty(dl_hashtbl->head + index);
     UNLOCK();
     return GC_SUCCESS;
 }
@@ -237,7 +262,6 @@ GC_API int GC_CALL GC_general_register_disappearing_link(void * * link,
 #endif
 
 /* Unregisters given link and returns the link entry to free.   */
-/* Assume the lock is held.                                     */
 GC_INLINE struct disappearing_link *GC_unregister_disappearing_link_inner(
                                 struct dl_hashtbl_s *dl_hashtbl, void **link)
 {
@@ -245,6 +269,7 @@ GC_INLINE struct disappearing_link *GC_unregister_disappearing_link_inner(
     struct disappearing_link *prev_dl = NULL;
     size_t index;
 
+    GC_ASSERT(I_HOLD_LOCK());
     if (dl_hashtbl->log_size == -1)
         return NULL; /* prevent integer shift by a negative amount */
 
@@ -255,8 +280,10 @@ GC_INLINE struct disappearing_link *GC_unregister_disappearing_link_inner(
             /* Remove found entry from the table. */
             if (NULL == prev_dl) {
                 dl_hashtbl -> head[index] = dl_next(curr_dl);
+                GC_dirty(dl_hashtbl->head + index);
             } else {
                 dl_set_next(prev_dl, dl_next(curr_dl));
+                GC_dirty(prev_dl);
             }
             dl_hashtbl -> entries--;
             break;
@@ -298,6 +325,7 @@ GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
   {
     int i;
     int new_size = 0;
+    GC_bool needs_barrier = FALSE;
 
     GC_ASSERT(I_HOLD_LOCK());
     for (i = 0; i < GC_toggleref_array_size; ++i) {
@@ -315,6 +343,7 @@ GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
         break;
       case GC_TOGGLE_REF_STRONG:
         GC_toggleref_arr[new_size++].strong_ref = obj;
+        needs_barrier = TRUE;
         break;
       case GC_TOGGLE_REF_WEAK:
         GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
@@ -329,6 +358,8 @@ GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
             (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
       GC_toggleref_array_size = new_size;
     }
+    if (needs_barrier)
+      GC_dirty(GC_toggleref_arr); /* entire object */
   }
 
   STATIC void GC_normal_finalize_mark_proc(ptr_t);
@@ -444,8 +475,11 @@ GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
       if (!ensure_toggleref_capacity(1)) {
         res = GC_NO_MEMORY;
       } else {
-        GC_toggleref_arr[GC_toggleref_array_size++].strong_ref =
+        GC_toggleref_arr[GC_toggleref_array_size].strong_ref =
                         is_strong_ref ? obj : (void *)GC_HIDE_POINTER(obj);
+        if (is_strong_ref)
+          GC_dirty(GC_toggleref_arr + GC_toggleref_array_size);
+        GC_toggleref_array_size++;
       }
     }
     UNLOCK();
@@ -512,6 +546,7 @@ GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void)
     word curr_hidden_link;
     word new_hidden_link;
 
+    GC_ASSERT(I_HOLD_LOCK());
     if (dl_hashtbl->log_size == -1)
         return GC_NOT_FOUND; /* prevent integer shift by a negative amount */
 
@@ -550,10 +585,13 @@ GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void)
       dl_hashtbl -> head[curr_index] = dl_next(curr_dl);
     } else {
       dl_set_next(prev_dl, dl_next(curr_dl));
+      GC_dirty(prev_dl);
     }
     curr_dl -> dl_hidden_link = new_hidden_link;
     dl_set_next(curr_dl, dl_hashtbl -> head[new_index]);
     dl_hashtbl -> head[new_index] = curr_dl;
+    GC_dirty(curr_dl);
+    GC_dirty(dl_hashtbl->head); /* entire object */
     return GC_SUCCESS;
   }
 
@@ -598,10 +636,27 @@ GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void)
 /* overflow is handled by the caller, and is not a disaster.            */
 STATIC void GC_normal_finalize_mark_proc(ptr_t p)
 {
+# if defined(_MSC_VER) && defined(I386)
     hdr * hhdr = HDR(p);
+    /* This is a manually inlined variant of GC_push_obj().  Otherwise  */
+    /* some optimizer bug is tickled in VC for X86 (v19, at least).     */
+#   define mark_stack_top GC_mark_stack_top
+    mse * mark_stack_limit = GC_mark_stack + GC_mark_stack_size;
+    word descr = hhdr -> hb_descr;
 
-    PUSH_OBJ(p, hhdr, GC_mark_stack_top,
-             &(GC_mark_stack[GC_mark_stack_size]));
+    if (descr != 0) {
+      mark_stack_top++;
+      if ((word)mark_stack_top >= (word)mark_stack_limit) {
+        mark_stack_top = GC_signal_mark_stack_overflow(mark_stack_top);
+      }
+      mark_stack_top -> mse_start = p;
+      mark_stack_top -> mse_descr.w = descr;
+    }
+#   undef mark_stack_top
+# else
+    GC_mark_stack_top = GC_push_obj(p, HDR(p), GC_mark_stack_top,
+                                    GC_mark_stack + GC_mark_stack_size);
+# endif
 }
 
 /* This only pays very partial attention to the mark descriptor.        */
@@ -665,7 +720,7 @@ STATIC void GC_register_finalizer_inner(void * obj,
     if (log_fo_table_size == -1
         || GC_fo_entries > ((word)1 << log_fo_table_size)) {
         GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
-                      &log_fo_table_size);
+                      &log_fo_table_size, &GC_fo_entries);
 #       ifdef LINT2
           if (log_fo_table_size < 0) ABORT("log_size is negative");
 #       endif
@@ -692,6 +747,7 @@ STATIC void GC_register_finalizer_inner(void * obj,
             GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
           } else {
             fo_set_next(prev_fo, fo_next(curr_fo));
+            GC_dirty(prev_fo);
           }
           if (fn == 0) {
             GC_fo_entries--;
@@ -705,20 +761,22 @@ STATIC void GC_register_finalizer_inner(void * obj,
             curr_fo -> fo_fn = fn;
             curr_fo -> fo_client_data = (ptr_t)cd;
             curr_fo -> fo_mark_proc = mp;
+            GC_dirty(curr_fo);
             /* Reinsert it.  We deleted it first to maintain    */
             /* consistency in the event of a signal.            */
             if (prev_fo == 0) {
               GC_fnlz_roots.fo_head[index] = curr_fo;
             } else {
               fo_set_next(prev_fo, curr_fo);
+              GC_dirty(prev_fo);
             }
           }
+          if (NULL == prev_fo)
+            GC_dirty(GC_fnlz_roots.fo_head + index);
           UNLOCK();
 #         ifndef DBG_HDRS_ALL
-            if (EXPECT(new_fo != 0, FALSE)) {
               /* Free unused new_fo returned by GC_oom_fn() */
               GC_free((void *)new_fo);
-            }
 #         endif
           return;
         }
@@ -773,8 +831,10 @@ STATIC void GC_register_finalizer_inner(void * obj,
     new_fo -> fo_object_size = hhdr -> hb_sz;
     new_fo -> fo_mark_proc = mp;
     fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
+    GC_dirty(new_fo);
     GC_fo_entries++;
     GC_fnlz_roots.fo_head[index] = new_fo;
+    GC_dirty(GC_fnlz_roots.fo_head + index);
     UNLOCK();
 }
 
@@ -897,68 +957,53 @@ GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj,
   }
 #endif /* THREADS */
 
-#define ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr_dl, prev_dl) \
-  { \
-    size_t i; \
-    size_t dl_size = dl_hashtbl->log_size == -1 ? 0 : \
-                                (size_t)1 << dl_hashtbl->log_size; \
-    for (i = 0; i < dl_size; i++) { \
-      struct disappearing_link *prev_dl = NULL; \
-      curr_dl = dl_hashtbl -> head[i]; \
-      while (curr_dl) {
-
-#define ITERATE_DL_HASHTBL_END(curr_dl, prev_dl) \
-        prev_dl = curr_dl; \
-        curr_dl = dl_next(curr_dl); \
-      } \
-    } \
-  }
-
-#define DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr_dl, prev_dl, next_dl) \
-  { \
-    next_dl = dl_next(curr_dl); \
-    if (NULL == prev_dl) { \
-        dl_hashtbl -> head[i] = next_dl; \
-    } else { \
-        dl_set_next(prev_dl, next_dl); \
-    } \
-    GC_clear_mark_bit(curr_dl); \
-    dl_hashtbl -> entries--; \
-    curr_dl = next_dl; \
-    continue; \
-  }
-
 GC_INLINE void GC_make_disappearing_links_disappear(
-                                struct dl_hashtbl_s* dl_hashtbl)
+                                        struct dl_hashtbl_s* dl_hashtbl,
+                                        GC_bool is_remove_dangling)
 {
-    struct disappearing_link *curr, *next;
+  size_t i;
+  size_t dl_size = dl_hashtbl->log_size == -1 ? 0
+                        : (size_t)1 << dl_hashtbl->log_size;
+  GC_bool needs_barrier = FALSE;
+
+  GC_ASSERT(I_HOLD_LOCK());
+  for (i = 0; i < dl_size; i++) {
+    struct disappearing_link *curr_dl, *next_dl;
+    struct disappearing_link *prev_dl = NULL;
 
-    ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
-        ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr->dl_hidden_obj);
-        ptr_t real_link = (ptr_t)GC_REVEAL_POINTER(curr->dl_hidden_link);
+    for (curr_dl = dl_hashtbl->head[i]; curr_dl != NULL; curr_dl = next_dl) {
+      next_dl = dl_next(curr_dl);
+      if (is_remove_dangling) {
+        ptr_t real_link = (ptr_t)GC_base(GC_REVEAL_POINTER(
+                                                curr_dl->dl_hidden_link));
 
-        if (!GC_is_marked(real_ptr)) {
-            *(word *)real_link = 0;
-            GC_clear_mark_bit(curr);
-            DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
+        if (NULL == real_link || EXPECT(GC_is_marked(real_link), TRUE)) {
+          prev_dl = curr_dl;
+          continue;
         }
-    ITERATE_DL_HASHTBL_END(curr, prev)
-}
-
-GC_INLINE void GC_remove_dangling_disappearing_links(
-                                struct dl_hashtbl_s* dl_hashtbl)
-{
-    struct disappearing_link *curr, *next;
-
-    ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
-        ptr_t real_link =
-                (ptr_t)GC_base(GC_REVEAL_POINTER(curr->dl_hidden_link));
-
-        if (NULL != real_link && !GC_is_marked(real_link)) {
-            GC_clear_mark_bit(curr);
-            DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
+      } else {
+        if (EXPECT(GC_is_marked((ptr_t)GC_REVEAL_POINTER(
+                                        curr_dl->dl_hidden_obj)), TRUE)) {
+          prev_dl = curr_dl;
+          continue;
         }
-    ITERATE_DL_HASHTBL_END(curr, prev)
+        *(ptr_t *)GC_REVEAL_POINTER(curr_dl->dl_hidden_link) = NULL;
+      }
+
+      /* Delete curr_dl entry from dl_hashtbl.  */
+      if (NULL == prev_dl) {
+        dl_hashtbl -> head[i] = next_dl;
+        needs_barrier = TRUE;
+      } else {
+        dl_set_next(prev_dl, next_dl);
+        GC_dirty(prev_dl);
+      }
+      GC_clear_mark_bit(curr_dl);
+      dl_hashtbl -> entries--;
+    }
+  }
+  if (needs_barrier)
+    GC_dirty(dl_hashtbl -> head); /* entire object */
 }
 
 /* Called with held lock (but the world is running).                    */
@@ -971,7 +1016,9 @@ GC_INNER void GC_finalize(void)
     size_t i;
     size_t fo_size = log_fo_table_size == -1 ? 0 :
                                 (size_t)1 << log_fo_table_size;
+    GC_bool needs_barrier = FALSE;
 
+    GC_ASSERT(I_HOLD_LOCK());
 #   ifndef SMALL_CONFIG
       /* Save current GC_[dl/ll]_entries value for stats printing */
       GC_old_dl_entries = GC_dl_hashtbl.entries;
@@ -983,7 +1030,7 @@ GC_INNER void GC_finalize(void)
 #   ifndef GC_TOGGLE_REFS_NOT_NEEDED
       GC_mark_togglerefs();
 #   endif
-    GC_make_disappearing_links_disappear(&GC_dl_hashtbl);
+    GC_make_disappearing_links_disappear(&GC_dl_hashtbl, FALSE);
 
   /* Mark all objects reachable via chains of 1 or more pointers        */
   /* from finalizable objects.                                          */
@@ -1018,8 +1065,14 @@ GC_INNER void GC_finalize(void)
               next_fo = fo_next(curr_fo);
               if (NULL == prev_fo) {
                 GC_fnlz_roots.fo_head[i] = next_fo;
+                if (GC_object_finalized_proc) {
+                  GC_dirty(GC_fnlz_roots.fo_head + i);
+                } else {
+                  needs_barrier = TRUE;
+                }
               } else {
                 fo_set_next(prev_fo, next_fo);
+                GC_dirty(prev_fo);
               }
               GC_fo_entries--;
               if (GC_object_finalized_proc)
@@ -1027,6 +1080,7 @@ GC_INNER void GC_finalize(void)
 
             /* Add to list of objects awaiting finalization.    */
               fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
+              GC_dirty(curr_fo);
               SET_FINALIZE_NOW(curr_fo);
               /* unhide object pointer so any future collections will   */
               /* see it.                                                */
@@ -1080,6 +1134,7 @@ GC_INNER void GC_finalize(void)
                 SET_FINALIZE_NOW(next_fo);
               } else {
                 fo_set_next(prev_fo, next_fo);
+                GC_dirty(prev_fo);
               }
               curr_fo -> fo_hidden_base =
                                 GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
@@ -1088,9 +1143,11 @@ GC_INNER void GC_finalize(void)
 
               i = HASH2(real_ptr, log_fo_table_size);
               fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
+              GC_dirty(curr_fo);
               GC_fo_entries++;
               GC_fnlz_roots.fo_head[i] = curr_fo;
               curr_fo = prev_fo;
+              needs_barrier = TRUE;
             }
           }
           prev_fo = curr_fo;
@@ -1098,14 +1155,18 @@ GC_INNER void GC_finalize(void)
         }
       }
   }
+  if (needs_barrier)
+    GC_dirty(GC_fnlz_roots.fo_head); /* entire object */
+
+  /* Remove dangling disappearing links. */
+  GC_make_disappearing_links_disappear(&GC_dl_hashtbl, TRUE);
 
-  GC_remove_dangling_disappearing_links(&GC_dl_hashtbl);
 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
     GC_clear_togglerefs();
 # endif
 # ifndef GC_LONG_REFS_NOT_NEEDED
-    GC_make_disappearing_links_disappear(&GC_ll_hashtbl);
-    GC_remove_dangling_disappearing_links(&GC_ll_hashtbl);
+    GC_make_disappearing_links_disappear(&GC_ll_hashtbl, FALSE);
+    GC_make_disappearing_links_disappear(&GC_ll_hashtbl, TRUE);
 # endif
 
   if (GC_fail_count) {
@@ -1121,13 +1182,14 @@ GC_INNER void GC_finalize(void)
 
 #ifndef JAVA_FINALIZATION_NOT_NEEDED
 
-  /* Enqueue all remaining finalizers to be run - Assumes lock is held. */
+  /* Enqueue all remaining finalizers to be run.        */
   STATIC void GC_enqueue_all_finalizers(void)
   {
     struct finalizable_object * next_fo;
     int i;
     int fo_size;
 
+    GC_ASSERT(I_HOLD_LOCK());
     fo_size = log_fo_table_size == -1 ? 0 : 1 << log_fo_table_size;
     GC_bytes_finalized = 0;
     for (i = 0; i < fo_size; i++) {
@@ -1144,6 +1206,7 @@ GC_INNER void GC_finalize(void)
 
           /* Add to list of objects awaiting finalization.      */
           fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
+          GC_dirty(curr_fo);
           SET_FINALIZE_NOW(curr_fo);
 
           /* unhide object pointer so any future collections will       */
@@ -1153,7 +1216,7 @@ GC_INNER void GC_finalize(void)
           GC_bytes_finalized +=
                 curr_fo -> fo_object_size + sizeof(struct finalizable_object);
           curr_fo = next_fo;
-        }
+      }
     }
     GC_fo_entries = 0;  /* all entries deleted from the hash table */
   }
@@ -1282,7 +1345,7 @@ GC_INNER void GC_notify_or_invoke_finalizers(void)
 #       ifdef KEEP_BACK_PTRS
           long i;
           /* Stops when GC_gc_no wraps; that's OK.      */
-          last_back_trace_gc_no = (word)(-1);  /* disable others. */
+          last_back_trace_gc_no = GC_WORD_MAX;  /* disable others. */
           for (i = 0; i < GC_backtraces; ++i) {
               /* FIXME: This tolerates concurrent heap mutation,        */
               /* which may cause occasional mysterious results.         */
@@ -1296,9 +1359,7 @@ GC_INNER void GC_notify_or_invoke_finalizers(void)
 #       endif
 #       ifdef MAKE_BACK_GRAPH
           if (GC_print_back_height) {
-            UNLOCK();
             GC_print_back_graph_stats();
-            LOCK();
           }
 #       endif
       }
@@ -1324,8 +1385,8 @@ GC_INNER void GC_notify_or_invoke_finalizers(void)
 
     /* These variables require synchronization to avoid data races.     */
     if (last_finalizer_notification != GC_gc_no) {
-        last_finalizer_notification = GC_gc_no;
         notifier_fn = GC_finalizer_notifier;
+        last_finalizer_notification = GC_gc_no;
     }
     UNLOCK();
     if (notifier_fn != 0)