uint32_t begin, end;
};
-static struct pb_buffer *
-amdgpu_bo_create(struct radeon_winsys *rws,
- uint64_t size,
- unsigned alignment,
- enum radeon_bo_domain domain,
- enum radeon_bo_flag flags);
static void amdgpu_bo_unmap(struct pb_buffer *buf);
static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
return true;
}
-static void *amdgpu_bo_map(struct pb_buffer *buf,
- struct radeon_cmdbuf *rcs,
- enum pipe_transfer_usage usage)
+void *amdgpu_bo_map(struct pb_buffer *buf,
+ struct radeon_cmdbuf *rcs,
+ enum pipe_transfer_usage usage)
{
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
struct amdgpu_winsys_bo *real;
}
assert(slab_size != 0);
- slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
+ slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(ws,
slab_size, slab_size,
domains, flags));
if (!slab->buffer)
bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
- buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
+ buf = amdgpu_bo_create(bo->ws, size, RADEON_SPARSE_PAGE_SIZE,
bo->initial_domain,
bo->u.sparse.flags | RADEON_FLAG_NO_SUBALLOC);
if (!buf) {
amdgpu_bo_set_metadata(bo->bo, &metadata);
}
-static struct pb_buffer *
-amdgpu_bo_create(struct radeon_winsys *rws,
+struct pb_buffer *
+amdgpu_bo_create(struct amdgpu_winsys *ws,
uint64_t size,
unsigned alignment,
enum radeon_bo_domain domain,
enum radeon_bo_flag flags)
{
- struct amdgpu_winsys *ws = amdgpu_winsys(rws);
struct amdgpu_winsys_bo *bo;
int heap = -1;
return &bo->base;
}
+static struct pb_buffer *
+amdgpu_buffer_create(struct radeon_winsys *ws,
+ uint64_t size,
+ unsigned alignment,
+ enum radeon_bo_domain domain,
+ enum radeon_bo_flag flags)
+{
+ return amdgpu_bo_create(amdgpu_winsys(ws), size, alignment, domain,
+ flags);
+}
+
static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
struct winsys_handle *whandle,
unsigned vm_alignment,
return ((struct amdgpu_winsys_bo*)buf)->va;
}
-void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
+void amdgpu_bo_init_functions(struct amdgpu_screen_winsys *ws)
{
ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
ws->base.buffer_map = amdgpu_bo_map;
ws->base.buffer_unmap = amdgpu_bo_unmap;
ws->base.buffer_wait = amdgpu_bo_wait;
- ws->base.buffer_create = amdgpu_bo_create;
+ ws->base.buffer_create = amdgpu_buffer_create;
ws->base.buffer_from_handle = amdgpu_bo_from_handle;
ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
return index;
}
-static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib,
+static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws,
+ struct amdgpu_ib *ib,
enum ring_type ring_type)
{
struct pb_buffer *pb;
buffer_size = MIN2(buffer_size, max_size);
buffer_size = MAX2(buffer_size, min_size); /* min_size is more important */
- pb = ws->base.buffer_create(&ws->base, buffer_size,
- ws->info.gart_page_size,
- RADEON_DOMAIN_GTT,
- RADEON_FLAG_NO_INTERPROCESS_SHARING |
- (ring_type == RING_GFX ||
- ring_type == RING_COMPUTE ||
- ring_type == RING_DMA ?
- RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC : 0));
+ pb = amdgpu_bo_create(ws, buffer_size,
+ ws->info.gart_page_size,
+ RADEON_DOMAIN_GTT,
+ RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ (ring_type == RING_GFX ||
+ ring_type == RING_COMPUTE ||
+ ring_type == RING_DMA ?
+ RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC : 0));
if (!pb)
return false;
- mapped = ws->base.buffer_map(pb, NULL, PIPE_TRANSFER_WRITE);
+ mapped = amdgpu_bo_map(pb, NULL, PIPE_TRANSFER_WRITE);
if (!mapped) {
pb_reference(&pb, NULL);
return false;
}
}
-static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
+static bool amdgpu_get_new_ib(struct amdgpu_winsys *ws, struct amdgpu_cs *cs,
enum ib_type ib_type)
{
- struct amdgpu_winsys *aws = amdgpu_winsys(ws);
/* Small IBs are better than big IBs, because the GPU goes idle quicker
* and there is less waiting for buffers and fences. Proof:
* http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
/* Allocate a new buffer for IBs if the current buffer is all used. */
if (!ib->big_ib_buffer ||
ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
- if (!amdgpu_ib_new_buffer(aws, ib, cs->ring_type))
+ if (!amdgpu_ib_new_buffer(ws, ib, cs->ring_type))
return false;
}
cs->csc = &cs->csc1;
cs->cst = &cs->csc2;
- if (!amdgpu_get_new_ib(&ctx->ws->base, cs, IB_MAIN)) {
+ if (!amdgpu_get_new_ib(ctx->ws, cs, IB_MAIN)) {
amdgpu_destroy_cs_context(&cs->csc2);
amdgpu_destroy_cs_context(&cs->csc1);
FREE(cs);
return NULL;
/* Allocate the compute IB. */
- if (!amdgpu_get_new_ib(&ws->base, cs, IB_PARALLEL_COMPUTE))
+ if (!amdgpu_get_new_ib(ws, cs, IB_PARALLEL_COMPUTE))
return NULL;
if (uses_gds_ordered_append) {
amdgpu_cs_context_cleanup(cs->csc);
}
- amdgpu_get_new_ib(&ws->base, cs, IB_MAIN);
+ amdgpu_get_new_ib(ws, cs, IB_MAIN);
if (cs->compute_ib.ib_mapped)
- amdgpu_get_new_ib(&ws->base, cs, IB_PARALLEL_COMPUTE);
+ amdgpu_get_new_ib(ws, cs, IB_PARALLEL_COMPUTE);
cs->main.base.used_gart = 0;
cs->main.base.used_vram = 0;
return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
}
-void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
+void amdgpu_cs_init_functions(struct amdgpu_screen_winsys *ws)
{
ws->base.ctx_create = amdgpu_ctx_create;
ws->base.ctx_destroy = amdgpu_ctx_destroy;
#include <xf86drm.h>
#include <stdio.h>
#include <sys/stat.h>
+#include <fcntl.h>
#include "amd/common/ac_llvm_util.h"
#include "amd/common/sid.h"
static void do_winsys_deinit(struct amdgpu_winsys *ws)
{
- AddrDestroy(ws->addrlib);
- amdgpu_device_deinitialize(ws->dev);
-}
-
-static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
-{
- struct amdgpu_winsys *ws = amdgpu_winsys(rws);
-
if (ws->reserve_vmid)
amdgpu_vm_unreserve_vmid(ws->dev, 0);
util_hash_table_destroy(ws->bo_export_table);
simple_mtx_destroy(&ws->global_bo_list_lock);
simple_mtx_destroy(&ws->bo_export_table_lock);
- do_winsys_deinit(ws);
+
+ AddrDestroy(ws->addrlib);
+ amdgpu_device_deinitialize(ws->dev);
+ FREE(ws);
+}
+
+static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
+{
+ struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
+ struct amdgpu_winsys *ws = sws->aws;
+ bool destroy;
+
+ /* When the reference counter drops to zero, remove the device pointer
+ * from the table.
+ * This must happen while the mutex is locked, so that
+ * amdgpu_winsys_create in another thread doesn't get the winsys
+ * from the table when the counter drops to 0.
+ */
+ simple_mtx_lock(&dev_tab_mutex);
+
+ destroy = pipe_reference(&ws->reference, NULL);
+ if (destroy && dev_tab) {
+ util_hash_table_remove(dev_tab, ws->dev);
+ if (util_hash_table_count(dev_tab) == 0) {
+ util_hash_table_destroy(dev_tab);
+ dev_tab = NULL;
+ }
+ }
+
+ simple_mtx_unlock(&dev_tab_mutex);
+
+ if (destroy)
+ do_winsys_deinit(ws);
+
+ close(sws->fd);
FREE(rws);
}
static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
{
- struct amdgpu_winsys *ws = amdgpu_winsys(rws);
- bool destroy;
-
- /* When the reference counter drops to zero, remove the device pointer
- * from the table.
- * This must happen while the mutex is locked, so that
- * amdgpu_winsys_create in another thread doesn't get the winsys
- * from the table when the counter drops to 0. */
- simple_mtx_lock(&dev_tab_mutex);
-
- destroy = pipe_reference(&ws->reference, NULL);
- if (destroy && dev_tab) {
- util_hash_table_remove(dev_tab, ws->dev);
- if (util_hash_table_count(dev_tab) == 0) {
- util_hash_table_destroy(dev_tab);
- dev_tab = NULL;
- }
- }
-
- simple_mtx_unlock(&dev_tab_mutex);
- return destroy;
+ /* radeon_winsys corresponds to amdgpu_screen_winsys, which is never
+ * referenced multiple times, so amdgpu_winsys_destroy always needs to be
+ * called. It handles reference counting for amdgpu_winsys.
+ */
+ return true;
}
static void amdgpu_pin_threads_to_L3_cache(struct radeon_winsys *rws,
amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
radeon_screen_create_t screen_create)
{
- struct amdgpu_winsys *ws;
+ struct amdgpu_screen_winsys *ws;
+ struct amdgpu_winsys *aws;
amdgpu_device_handle dev;
uint32_t drm_major, drm_minor, r;
+ ws = CALLOC_STRUCT(amdgpu_screen_winsys);
+ if (!ws)
+ return NULL;
+
+ ws->fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
+
/* Look up the winsys from the dev table. */
simple_mtx_lock(&dev_tab_mutex);
if (!dev_tab)
* for the same fd. */
r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
if (r) {
- simple_mtx_unlock(&dev_tab_mutex);
fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
- return NULL;
+ goto fail;
}
/* Lookup a winsys if we have already created one for this device. */
- ws = util_hash_table_get(dev_tab, dev);
- if (ws) {
- pipe_reference(NULL, &ws->reference);
+ aws = util_hash_table_get(dev_tab, dev);
+ if (aws) {
+ pipe_reference(NULL, &aws->reference);
simple_mtx_unlock(&dev_tab_mutex);
/* Release the device handle, because we don't need it anymore.
* has its own device handle.
*/
amdgpu_device_deinitialize(dev);
- return &ws->base;
- }
+ } else {
+ /* Create a new winsys. */
+ aws = CALLOC_STRUCT(amdgpu_winsys);
+ if (!aws)
+ goto fail;
+
+ aws->dev = dev;
+ aws->info.drm_major = drm_major;
+ aws->info.drm_minor = drm_minor;
+
+ if (!do_winsys_init(aws, config, fd))
+ goto fail_alloc;
+
+ /* Create managers. */
+ pb_cache_init(&aws->bo_cache, RADEON_MAX_CACHED_HEAPS,
+ 500000, aws->check_vm ? 1.0f : 2.0f, 0,
+ (aws->info.vram_size + aws->info.gart_size) / 8,
+ amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
+
+ unsigned min_slab_order = 9; /* 512 bytes */
+ unsigned max_slab_order = 18; /* 256 KB - higher numbers increase memory usage */
+ unsigned num_slab_orders_per_allocator = (max_slab_order - min_slab_order) /
+ NUM_SLAB_ALLOCATORS;
+
+ /* Divide the size order range among slab managers. */
+ for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
+ unsigned min_order = min_slab_order;
+ unsigned max_order = MIN2(min_order + num_slab_orders_per_allocator,
+ max_slab_order);
+
+ if (!pb_slabs_init(&aws->bo_slabs[i],
+ min_order, max_order,
+ RADEON_MAX_SLAB_HEAPS,
+ aws,
+ amdgpu_bo_can_reclaim_slab,
+ amdgpu_bo_slab_alloc,
+ amdgpu_bo_slab_free)) {
+ amdgpu_winsys_destroy(&ws->base);
+ simple_mtx_unlock(&dev_tab_mutex);
+ return NULL;
+ }
- /* Create a new winsys. */
- ws = CALLOC_STRUCT(amdgpu_winsys);
- if (!ws)
- goto fail;
+ min_slab_order = max_order + 1;
+ }
- ws->dev = dev;
- ws->info.drm_major = drm_major;
- ws->info.drm_minor = drm_minor;
+ aws->info.min_alloc_size = 1 << aws->bo_slabs[0].min_order;
- if (!do_winsys_init(ws, config, fd))
- goto fail_alloc;
+ /* init reference */
+ pipe_reference_init(&aws->reference, 1);
- /* Create managers. */
- pb_cache_init(&ws->bo_cache, RADEON_MAX_CACHED_HEAPS,
- 500000, ws->check_vm ? 1.0f : 2.0f, 0,
- (ws->info.vram_size + ws->info.gart_size) / 8,
- amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
+ LIST_INITHEAD(&aws->global_bo_list);
+ aws->bo_export_table = util_hash_table_create(hash_pointer, compare_pointers);
- unsigned min_slab_order = 9; /* 512 bytes */
- unsigned max_slab_order = 18; /* 256 KB - higher numbers increase memory usage */
- unsigned num_slab_orders_per_allocator = (max_slab_order - min_slab_order) /
- NUM_SLAB_ALLOCATORS;
+ (void) simple_mtx_init(&aws->global_bo_list_lock, mtx_plain);
+ (void) simple_mtx_init(&aws->bo_fence_lock, mtx_plain);
+ (void) simple_mtx_init(&aws->bo_export_table_lock, mtx_plain);
- /* Divide the size order range among slab managers. */
- for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
- unsigned min_order = min_slab_order;
- unsigned max_order = MIN2(min_order + num_slab_orders_per_allocator,
- max_slab_order);
-
- if (!pb_slabs_init(&ws->bo_slabs[i],
- min_order, max_order,
- RADEON_MAX_SLAB_HEAPS,
- ws,
- amdgpu_bo_can_reclaim_slab,
- amdgpu_bo_slab_alloc,
- amdgpu_bo_slab_free)) {
+ if (!util_queue_init(&aws->cs_queue, "cs", 8, 1,
+ UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
amdgpu_winsys_destroy(&ws->base);
simple_mtx_unlock(&dev_tab_mutex);
return NULL;
}
- min_slab_order = max_order + 1;
- }
+ util_hash_table_set(dev_tab, dev, aws);
- ws->info.min_alloc_size = 1 << ws->bo_slabs[0].min_order;
+ if (aws->reserve_vmid) {
+ r = amdgpu_vm_reserve_vmid(dev, 0);
+ if (r) {
+ amdgpu_winsys_destroy(&ws->base);
+ simple_mtx_unlock(&dev_tab_mutex);
+ return NULL;
+ }
+ }
+ }
- /* init reference */
- pipe_reference_init(&ws->reference, 1);
+ ws->aws = aws;
/* Set functions. */
ws->base.unref = amdgpu_winsys_unref;
amdgpu_cs_init_functions(ws);
amdgpu_surface_init_functions(ws);
- LIST_INITHEAD(&ws->global_bo_list);
- ws->bo_export_table = util_hash_table_create(hash_pointer, compare_pointers);
-
- (void) simple_mtx_init(&ws->global_bo_list_lock, mtx_plain);
- (void) simple_mtx_init(&ws->bo_fence_lock, mtx_plain);
- (void) simple_mtx_init(&ws->bo_export_table_lock, mtx_plain);
-
- if (!util_queue_init(&ws->cs_queue, "cs", 8, 1,
- UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
- amdgpu_winsys_destroy(&ws->base);
- simple_mtx_unlock(&dev_tab_mutex);
- return NULL;
- }
-
/* Create the screen at the end. The winsys must be initialized
* completely.
*
return NULL;
}
- util_hash_table_set(dev_tab, dev, ws);
-
- if (ws->reserve_vmid) {
- r = amdgpu_vm_reserve_vmid(dev, 0);
- if (r) {
- fprintf(stderr, "amdgpu: amdgpu_vm_reserve_vmid failed. (%i)\n", r);
- goto fail_cache;
- }
- }
-
/* We must unlock the mutex once the winsys is fully initialized, so that
* other threads attempting to create the winsys from the same fd will
* get a fully initialized winsys and not just half-way initialized. */
return &ws->base;
-fail_cache:
- pb_cache_deinit(&ws->bo_cache);
- do_winsys_deinit(ws);
fail_alloc:
- FREE(ws);
+ FREE(aws);
fail:
+ close(ws->fd);
+ FREE(ws);
simple_mtx_unlock(&dev_tab_mutex);
return NULL;
}