bo = bo_from_handle(dev, size, handle);
simple_mtx_unlock(&table_lock);
+ bo->alloc_flags = flags;
bo->max_fences = 1;
bo->fences = &bo->_inline_fence;
* (MRU, since likely to be in GPU cache), rather than head (LRU)..
*/
simple_mtx_lock(&table_lock);
- if (!list_is_empty(&bucket->list)) {
- bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
- /* TODO check for compatible flags? */
- if (fd_bo_state(bo) == FD_BO_STATE_IDLE) {
+ list_for_each_entry (struct fd_bo, entry, &bucket->list, list) {
+ if (fd_bo_state(entry) != FD_BO_STATE_IDLE)
+ break;
+ if (entry->alloc_flags == flags) {
+ bo = entry;
list_del(&bo->list);
- } else {
- bo = NULL;
+ break;
}
}
simple_mtx_unlock(&table_lock);
uint32_t name;
int32_t refcnt;
uint32_t reloc_flags; /* flags like FD_RELOC_DUMP to use for relocs to this BO */
+ uint32_t alloc_flags; /* flags that control allocation/mapping, ie. FD_BO_x */
uint64_t iova;
void *map;
const struct fd_bo_funcs *funcs;