1 /**************************************************************************
3 * Copyright 2008 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Improved cache implementation.
32 * Fixed size array with linear probing on collision and LRU eviction
35 * @author Jose Fonseca <jfonseca@vmware.com>
39 #include "pipe/p_compiler.h"
40 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "util/u_cache.h"
45 #include "util/u_simple_list.h"
48 struct util_cache_entry
50 enum { EMPTY = 0, FILLED, DELETED } state;
53 struct util_cache_entry *next;
54 struct util_cache_entry *prev;
68 uint32_t (*hash)(const void *key);
70 /** Compare two keys */
71 int (*compare)(const void *key1, const void *key2);
73 /** Destroy a (key, value) pair */
74 void (*destroy)(void *key, void *value);
78 struct util_cache_entry *entries;
81 struct util_cache_entry lru;
85 ensure_sanity(const struct util_cache *cache);
87 #define CACHE_DEFAULT_ALPHA 2
90 util_cache_create(uint32_t (*hash)(const void *key),
91 int (*compare)(const void *key1, const void *key2),
92 void (*destroy)(void *key, void *value),
95 struct util_cache *cache;
97 cache = CALLOC_STRUCT(util_cache);
102 cache->compare = compare;
103 cache->destroy = destroy;
105 make_empty_list(&cache->lru);
107 size *= CACHE_DEFAULT_ALPHA;
110 cache->entries = CALLOC(size, sizeof(struct util_cache_entry));
111 if(!cache->entries) {
116 ensure_sanity(cache);
121 static struct util_cache_entry *
122 util_cache_entry_get(struct util_cache *cache,
126 struct util_cache_entry *first_unfilled = NULL;
127 uint32_t index = hash % cache->size;
130 /* Probe until we find either a matching FILLED entry or an EMPTY
131 * slot (which has never been occupied).
133 * Deleted or non-matching slots are not indicative of completion
134 * as a previous linear probe for the same key could have continued
137 for (probe = 0; probe < cache->size; probe++) {
138 uint32_t i = (index + probe) % cache->size;
139 struct util_cache_entry *current = &cache->entries[i];
141 if (current->state == FILLED) {
142 if (current->hash == hash &&
143 cache->compare(key, current->key) == 0)
148 first_unfilled = current;
150 if (current->state == EMPTY)
151 return first_unfilled;
159 util_cache_entry_destroy(struct util_cache *cache,
160 struct util_cache_entry *entry)
162 void *key = entry->key;
163 void *value = entry->value;
168 if (entry->state == FILLED) {
169 remove_from_list(entry);
173 cache->destroy(key, value);
175 entry->state = DELETED;
181 util_cache_set(struct util_cache *cache,
185 struct util_cache_entry *entry;
186 uint32_t hash = cache->hash(key);
192 entry = util_cache_entry_get(cache, hash, key);
194 entry = cache->lru.prev;
196 if (cache->count >= cache->size / CACHE_DEFAULT_ALPHA)
197 util_cache_entry_destroy(cache, cache->lru.prev);
199 util_cache_entry_destroy(cache, entry);
207 entry->value = value;
208 entry->state = FILLED;
209 insert_at_head(&cache->lru, entry);
212 ensure_sanity(cache);
217 util_cache_get(struct util_cache *cache,
220 struct util_cache_entry *entry;
221 uint32_t hash = cache->hash(key);
227 entry = util_cache_entry_get(cache, hash, key);
231 if (entry->state == FILLED)
232 move_to_head(&cache->lru, entry);
239 util_cache_clear(struct util_cache *cache)
247 for(i = 0; i < cache->size; ++i) {
248 util_cache_entry_destroy(cache, &cache->entries[i]);
249 cache->entries[i].state = EMPTY;
252 assert(cache->count == 0);
253 assert(is_empty_list(&cache->lru));
254 ensure_sanity(cache);
259 util_cache_destroy(struct util_cache *cache)
266 if(cache->count >= 20*cache->size) {
267 /* Normal approximation of the Poisson distribution */
268 double mean = (double)cache->count/(double)cache->size;
269 double stddev = sqrt(mean);
271 for(i = 0; i < cache->size; ++i) {
272 double z = fabs(cache->entries[i].count - mean)/stddev;
273 /* This assert should not fail 99.9999998027% of the times, unless
274 * the hash function is a poor one */
280 util_cache_clear(cache);
282 FREE(cache->entries);
288 util_cache_remove(struct util_cache *cache,
291 struct util_cache_entry *entry;
298 hash = cache->hash(key);
300 entry = util_cache_entry_get(cache, hash, key);
304 if (entry->state == FILLED)
305 util_cache_entry_destroy(cache, entry);
307 ensure_sanity(cache);
312 ensure_sanity(const struct util_cache *cache)
318 for (i = 0; i < cache->size; i++) {
319 struct util_cache_entry *header = &cache->entries[i];
322 assert(header->state == FILLED ||
323 header->state == EMPTY ||
324 header->state == DELETED);
325 if (header->state == FILLED) {
327 assert(header->hash == cache->hash(header->key));
331 assert(cnt == cache->count);
332 assert(cache->size >= cnt);
334 if (cache->count == 0) {
335 assert (is_empty_list(&cache->lru));
338 struct util_cache_entry *header = cache->lru.next;
341 assert (!is_empty_list(&cache->lru));
343 for (i = 0; i < cache->count; i++)
344 header = header->next;
346 assert(header == &cache->lru);