/* found, incr refcnt and return: */
bo = fd_bo_ref(entry->data);
- if (!list_is_empty(&bo->list)) {
+ if (!list_is_empty(&bo->node)) {
mesa_logw("bo was in cache, size=%u, alloc_flags=0x%x\n",
bo->size, bo->alloc_flags);
}
/* don't break the bucket if this bo was found in one */
- list_delinit(&bo->list);
+ list_delinit(&bo->node);
}
return bo;
}
bo->reloc_flags = FD_RELOC_FLAGS_INIT;
p_atomic_set(&bo->refcnt, 1);
- list_inithead(&bo->list);
+ list_inithead(&bo->node);
}
/* allocate a new buffer object, call w/ table_lock held */
static void
bo_remove_from_bucket(struct fd_bo_bucket *bucket, struct fd_bo *bo)
{
- list_delinit(&bo->list);
+ list_delinit(&bo->node);
bucket->count--;
}
struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
if (bucket->count > 0) {
- struct fd_bo *bo = list_first_entry(&bucket->list, struct fd_bo, list);
+ struct fd_bo *bo = first_bo(&bucket->list);
if (fd_bo_state(bo) == FD_BO_STATE_IDLE)
state = " (idle)";
}
struct fd_bo *bo;
while (!list_is_empty(&bucket->list)) {
- bo = list_entry(bucket->list.next, struct fd_bo, list);
+ bo = first_bo(&bucket->list);
/* keep things in cache for at least 1 second: */
if (time && ((time - bo->free_time) <= 1))
* (MRU, since likely to be in GPU cache), rather than head (LRU)..
*/
simple_mtx_lock(&table_lock);
- list_for_each_entry (struct fd_bo, entry, &bucket->list, list) {
+ foreach_bo (entry, &bucket->list) {
if (fd_bo_state(entry) != FD_BO_STATE_IDLE) {
break;
}
bo->free_time = time.tv_sec;
VG_BO_RELEASE(bo);
- list_addtail(&bo->list, &bucket->list);
+ list_addtail(&bo->node, &bucket->list);
bucket->count++;
*/
uint32_t idx;
- struct list_head list; /* bucket-list entry */
+ struct list_head node; /* bucket-list entry */
time_t free_time; /* time when added to bucket-list */
unsigned short nr_fences, max_fences;
#define last_submit(list) \
list_last_entry(list, struct fd_submit, node)
+#define foreach_bo(name, list) \
+ list_for_each_entry(struct fd_bo, name, list, node)
+#define foreach_bo_safe(name, list) \
+ list_for_each_entry_safe(struct fd_bo, name, list, node)
+#define first_bo(list) \
+ list_first_entry(list, struct fd_bo, node)
+
+
void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse, const char *name);
void fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time);
struct fd_bo *fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size,
* latency hit of waiting for the host to catch up.
*/
simple_mtx_lock(&virtio_dev->eb_lock);
- list_addtail(&bo->list, &virtio_dev->prealloc_list);
- bo = list_first_entry(&virtio_dev->prealloc_list, struct fd_bo, list);
- list_delinit(&bo->list);
+ list_addtail(&bo->node, &virtio_dev->prealloc_list);
+ bo = first_bo(&virtio_dev->prealloc_list);
+ list_delinit(&bo->node);
simple_mtx_unlock(&virtio_dev->eb_lock);
}
struct fd_bo *bo = virtio_bo_new_impl(dev, SUBALLOC_SIZE, RING_FLAGS);
if (!bo)
break;
- list_addtail(&bo->list, &virtio_dev->prealloc_list);
+ list_addtail(&bo->node, &virtio_dev->prealloc_list);
}
}