2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
20 /* Note that for these routines, it is the clients responsibility to */
21 /* add the extra byte at the end to deal with one-past-the-end pointers.*/
22 /* In the standard collector configuration, the collector assumes that */
23 /* such a byte has been added, and hence does not trace the last word */
24 /* in the resulting object. */
25 /* This is not an issue if the collector is compiled with */
26 /* DONT_ADD_BYTE_AT_END, or if GC_all_interior_pointers is not set. */
27 /* This interface is most useful for compilers that generate C. */
28 /* It is also used internally for thread-local allocation. */
29 /* Manual use is hereby discouraged. */
32 #include "gc_tiny_fl.h"
34 #if GC_GNUC_PREREQ(3, 0)
35 # define GC_EXPECT(expr, outcome) __builtin_expect(expr,outcome)
36 /* Equivalent to (expr), but predict that usually (expr)==outcome. */
38 # define GC_EXPECT(expr, outcome) (expr)
43 # define GC_ASSERT(expr) /* empty */
46 # define GC_ASSERT(expr) assert(expr)
54 #ifndef GC_PREFETCH_FOR_WRITE
55 # if GC_GNUC_PREREQ(3, 0) && !defined(GC_NO_PREFETCH_FOR_WRITE)
56 # define GC_PREFETCH_FOR_WRITE(x) __builtin_prefetch((x), 1)
58 # define GC_PREFETCH_FOR_WRITE(x) (void)0
62 /* Object kinds; must match PTRFREE, NORMAL in gc_priv.h. */
63 #define GC_I_PTRFREE 0
66 /* Store a pointer to a list of newly allocated objects of kind k and */
67 /* size lb in *result. The caller must make sure that *result is */
68 /* traced even if objects are ptrfree. */
69 GC_API void GC_CALL GC_generic_malloc_many(size_t /* lb */, int /* k */,
70 void ** /* result */);
72 /* Generalized version of GC_malloc and GC_malloc_atomic. */
73 /* Uses appropriately the thread-local (if available) or the global */
74 /* free-list of the specified kind. */
75 GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
76 GC_malloc_kind(size_t /* lb */, int /* k */);
79 /* Same as above but uses only the global free-list. */
80 GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
81 GC_malloc_kind_global(size_t /* lb */, int /* k */);
83 # define GC_malloc_kind_global GC_malloc_kind
86 /* An internal macro to update the free list pointer atomically (if */
87 /* the AO primitives are available) to avoid race with the marker. */
88 #if defined(GC_THREADS) && defined(AO_HAVE_store)
89 # define GC_FAST_M_AO_STORE(my_fl, next) \
90 AO_store((volatile AO_t *)(my_fl), (AO_t)(next))
92 # define GC_FAST_M_AO_STORE(my_fl, next) (void)(*(my_fl) = (next))
95 /* The ultimately general inline allocation macro. Allocate an object */
96 /* of size granules, putting the resulting pointer in result. Tiny_fl */
97 /* is a "tiny" free list array, which will be used first, if the size */
98 /* is appropriate. If granules is too large, we allocate with */
99 /* default_expr instead. If we need to refill the free list, we use */
100 /* GC_generic_malloc_many with the indicated kind. */
101 /* Tiny_fl should be an array of GC_TINY_FREELISTS void * pointers. */
102 /* If num_direct is nonzero, and the individual free list pointers */
103 /* are initialized to (void *)1, then we allocate num_direct granules */
104 /* directly using generic_malloc before putting multiple objects into */
105 /* the tiny_fl entry. If num_direct is zero, then the free lists may */
106 /* also be initialized to (void *)0. */
107 /* Note that we use the zeroth free list to hold objects 1 granule in */
108 /* size that are used to satisfy size 0 allocation requests. */
109 /* We rely on much of this hopefully getting optimized away in the */
110 /* num_direct = 0 case. */
111 /* Particularly if granules is constant, this should generate a small */
112 /* amount of code. */
113 # define GC_FAST_MALLOC_GRANS(result,granules,tiny_fl,num_direct, \
114 kind,default_expr,init) \
116 if (GC_EXPECT((granules) >= GC_TINY_FREELISTS,0)) { \
117 result = (default_expr); \
119 void **my_fl = (tiny_fl) + (granules); \
120 void *my_entry=*my_fl; \
124 if (GC_EXPECT((GC_word)my_entry \
125 > (num_direct) + GC_TINY_FREELISTS + 1, 1)) { \
126 next = *(void **)(my_entry); \
127 result = (void *)my_entry; \
128 GC_FAST_M_AO_STORE(my_fl, next); \
130 GC_PREFETCH_FOR_WRITE(next); \
131 if ((kind) != GC_I_PTRFREE) { \
132 GC_end_stubborn_change(my_fl); \
133 GC_reachable_here(next); \
135 GC_ASSERT(GC_size(result) >= (granules)*GC_GRANULE_BYTES); \
136 GC_ASSERT((kind) == GC_I_PTRFREE \
137 || ((GC_word *)result)[1] == 0); \
140 /* Entry contains counter or NULL */ \
141 if ((GC_signed_word)my_entry - (GC_signed_word)(num_direct) <= 0 \
142 /* (GC_word)my_entry <= (num_direct) */ \
143 && my_entry != NULL) { \
144 /* Small counter value, not NULL */ \
145 GC_FAST_M_AO_STORE(my_fl, (char *)my_entry \
147 result = (default_expr); \
150 /* Large counter or NULL */ \
151 GC_generic_malloc_many(((granules) == 0? GC_GRANULE_BYTES : \
152 GC_RAW_BYTES_FROM_INDEX(granules)), \
155 if (my_entry == 0) { \
156 result = (*GC_get_oom_fn())((granules)*GC_GRANULE_BYTES); \
164 # define GC_WORDS_TO_WHOLE_GRANULES(n) \
165 GC_WORDS_TO_GRANULES((n) + GC_GRANULE_WORDS - 1)
167 /* Allocate n words (NOT BYTES). X is made to point to the result. */
168 /* This should really only be used if GC_all_interior_pointers is */
169 /* not set, or DONT_ADD_BYTE_AT_END is set. See above. */
170 /* The semantics changed in version 7.0; we no longer lock, and */
171 /* the caller is responsible for supplying a cleared tiny_fl */
172 /* free list array. For single-threaded applications, this may be */
173 /* a global array. */
174 # define GC_MALLOC_WORDS_KIND(result,n,tiny_fl,kind,init) \
176 size_t granules = GC_WORDS_TO_WHOLE_GRANULES(n); \
177 GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, 0, kind, \
178 GC_malloc_kind(granules*GC_GRANULE_BYTES, kind), \
182 # define GC_MALLOC_WORDS(result,n,tiny_fl) \
183 GC_MALLOC_WORDS_KIND(result, n, tiny_fl, GC_I_NORMAL, \
184 *(void **)(result) = 0)
186 # define GC_MALLOC_ATOMIC_WORDS(result,n,tiny_fl) \
187 GC_MALLOC_WORDS_KIND(result, n, tiny_fl, GC_I_PTRFREE, (void)0)
189 /* And once more for two word initialized objects: */
190 # define GC_CONS(result, first, second, tiny_fl) \
192 void *l = (void *)(first); \
193 void *r = (void *)(second); \
194 GC_MALLOC_WORDS_KIND(result, 2, tiny_fl, GC_I_NORMAL, (void)0); \
195 if ((result) != NULL) { \
196 *(void **)(result) = l; \
197 ((void **)(result))[1] = r; \
198 GC_end_stubborn_change(result); \
199 GC_reachable_here(l); \
200 GC_reachable_here(r); \
204 GC_API void GC_CALL GC_print_free_list(int /* kind */,
205 size_t /* sz_in_granules */);
211 #endif /* !GC_INLINE_H */