sync->state = initial_value ? NVK_BO_SYNC_STATE_SIGNALED :
NVK_BO_SYNC_STATE_RESET;
- sync->bo = nouveau_ws_bo_new(dev->pdev->dev, 0x1000, 0,
+ sync->bo = nouveau_ws_bo_new(dev->ws_dev, 0x1000, 0,
NOUVEAU_WS_BO_GART);
if (!sync->bo)
return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
return vk_error(pool, VK_ERROR_OUT_OF_HOST_MEMORY);
uint32_t flags = NOUVEAU_WS_BO_GART | NOUVEAU_WS_BO_MAP;
- bo->bo = nouveau_ws_bo_new_mapped(dev->pdev->dev, NVK_CMD_BO_SIZE, 0,
+ bo->bo = nouveau_ws_bo_new_mapped(dev->ws_dev, NVK_CMD_BO_SIZE, 0,
flags, NOUVEAU_WS_BO_WR, &bo->map);
if (bo->bo == NULL) {
vk_free(&pool->vk.alloc, bo);
if (bo_size) {
uint32_t flags = NOUVEAU_WS_BO_GART | NOUVEAU_WS_BO_MAP;
- pool->bo = nouveau_ws_bo_new(device->pdev->dev, bo_size, 0, flags);
+ pool->bo = nouveau_ws_bo_new(device->ws_dev, bo_size, 0, flags);
if (!pool->bo) {
nvk_destroy_descriptor_pool(device, pAllocator, pool);
return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
struct nvk_descriptor_table *table,
uint32_t new_alloc)
{
- struct nvk_physical_device *pdev = nvk_device_physical(dev);
struct nouveau_ws_bo *new_bo;
void *new_map;
uint32_t *new_free_table;
assert(new_alloc > table->alloc && new_alloc <= table->max_alloc);
const uint32_t new_bo_size = new_alloc * table->desc_size;
- new_bo = nouveau_ws_bo_new(pdev->dev, new_bo_size, 256,
+ new_bo = nouveau_ws_bo_new(dev->ws_dev, new_bo_size, 256,
NOUVEAU_WS_BO_LOCAL | NOUVEAU_WS_BO_MAP);
if (new_bo == NULL) {
return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
if (likely(bytes_per_mp <= area->bytes_per_mp))
return VK_SUCCESS;
- uint64_t size = bytes_per_mp * dev->pdev->dev->mp_count;
+ uint64_t size = bytes_per_mp * dev->ws_dev->mp_count;
/* The hardware seems to require this alignment for
* NV9097_SET_SHADER_LOCAL_MEMORY_D_SIZE_LOWER.
size = ALIGN(size, 0x20000);
struct nouveau_ws_bo *bo =
- nouveau_ws_bo_new(dev->pdev->dev, size, 0, NOUVEAU_WS_BO_LOCAL);
+ nouveau_ws_bo_new(dev->ws_dev, size, 0, NOUVEAU_WS_BO_LOCAL);
if (bo == NULL)
return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
if (result != VK_SUCCESS)
goto fail_alloc;
- vk_device_set_drm_fd(&dev->vk, pdev->dev->fd);
+ vk_device_set_drm_fd(&dev->vk, pdev->ws_dev->fd);
dev->vk.command_buffer_ops = &nvk_cmd_buffer_ops;
dev->pdev = pdev;
+ dev->ws_dev = pdev->ws_dev;
- int ret = nouveau_ws_context_create(pdev->dev, &dev->ws_ctx);
+ int ret = nouveau_ws_context_create(dev->ws_dev, &dev->ws_ctx);
if (ret) {
if (ret == -ENOSPC)
result = vk_error(dev, VK_ERROR_TOO_MANY_OBJECTS);
pthread_condattr_destroy(&condattr);
void *zero_map;
- dev->zero_page = nouveau_ws_bo_new_mapped(dev->pdev->dev, 0x1000, 0,
- NOUVEAU_WS_BO_LOCAL,
- NOUVEAU_WS_BO_WR, &zero_map);
+ dev->zero_page = nouveau_ws_bo_new_mapped(dev->ws_dev, 0x1000, 0,
+ NOUVEAU_WS_BO_LOCAL,
+ NOUVEAU_WS_BO_WR, &zero_map);
if (dev->zero_page == NULL)
goto fail_queue_submit;
if (dev->pdev->info.cls_eng3d >= FERMI_A &&
dev->pdev->info.cls_eng3d < MAXWELL_A) {
/* max size is 256k */
- dev->vab_memory = nouveau_ws_bo_new(dev->pdev->dev, 1 << 17, 1 << 20,
+ dev->vab_memory = nouveau_ws_bo_new(dev->ws_dev, 1 << 17, 1 << 20,
NOUVEAU_WS_BO_LOCAL);
if (dev->vab_memory == NULL)
goto fail_zero_page;
struct vk_device vk;
struct nvk_physical_device *pdev;
+ struct nouveau_ws_device *ws_dev;
struct nouveau_ws_context *ws_ctx;
/* Protected by nvk_device::mutex */
switch (handleType) {
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
- bo = nouveau_ws_bo_from_dma_buf(pdev->dev, fd);
+ bo = nouveau_ws_bo_from_dma_buf(dev->ws_dev, fd);
if (bo == NULL)
return vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
break;
fd_info->handleType ==
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
- mem->bo = nouveau_ws_bo_from_dma_buf(pdev->dev, fd_info->fd);
+ mem->bo = nouveau_ws_bo_from_dma_buf(dev->ws_dev, fd_info->fd);
if (mem->bo == NULL) {
result = vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
goto fail_alloc;
}
assert(!(flags & ~mem->bo->flags));
} else if (tile_info) {
- mem->bo = nouveau_ws_bo_new_tiled(pdev->dev,
+ mem->bo = nouveau_ws_bo_new_tiled(dev->ws_dev,
pAllocateInfo->allocationSize, 0,
tile_info->pte_kind,
tile_info->tile_mode,
goto fail_alloc;
}
} else {
- mem->bo = nouveau_ws_bo_new(pdev->dev, aligned_size, alignment, flags);
+ mem->bo = nouveau_ws_bo_new(dev->ws_dev, aligned_size, alignment, flags);
if (!mem->bo) {
result = vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
goto fail_alloc;
}
}
- if (pdev->dev->debug_flags & NVK_DEBUG_ZERO_MEMORY) {
+ if (dev->ws_dev->debug_flags & NVK_DEBUG_ZERO_MEMORY) {
if (type->propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
void *map = nouveau_ws_bo_map(mem->bo, NOUVEAU_WS_BO_RDWR);
if (map == NULL) {
void *new_bo_map;
struct nouveau_ws_bo *new_bo =
- nouveau_ws_bo_new_mapped(dev->pdev->dev,
+ nouveau_ws_bo_new_mapped(dev->ws_dev,
new_bo_size + heap->overalloc, 0,
heap->bo_flags, heap->map_flags,
&new_bo_map);
NVK_HEAP_MIN_SIZE << (MAX2(heap->bo_count, 1) - 1);
heap->bos[heap->bo_count].bo =
- nouveau_ws_bo_new_mapped(dev->pdev->dev,
+ nouveau_ws_bo_new_mapped(dev->ws_dev,
new_bo_size + heap->overalloc, 0,
heap->bo_flags, heap->map_flags,
&heap->bos[heap->bo_count].map);
return VK_ERROR_INCOMPATIBLE_DRIVER;
}
- struct nouveau_ws_device *ndev = nouveau_ws_device_new(drm_device);
- if (!ndev)
+ struct nouveau_ws_device *ws_dev = nouveau_ws_device_new(drm_device);
+ if (!ws_dev)
return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
vk_warn_non_conformant_implementation("NVK");
&dispatch_table, &wsi_physical_device_entrypoints, false);
struct vk_device_extension_table supported_extensions;
- nvk_get_device_extensions(&ndev->info, &supported_extensions);
+ nvk_get_device_extensions(&ws_dev->info, &supported_extensions);
struct vk_features supported_features;
- nvk_get_device_features(&ndev->info, &supported_features);
+ nvk_get_device_features(&ws_dev->info, &supported_features);
result = vk_physical_device_init(&pdev->vk, &instance->vk,
&supported_extensions,
if (result != VK_SUCCESS)
goto fail_alloc;
- pdev->dev = ndev;
- pdev->info = ndev->info;
+ pdev->ws_dev = ws_dev;
+ pdev->info = ws_dev->info;
const struct {
uint16_t vendor_id;
fail_alloc:
vk_free(&instance->vk.alloc, pdev);
fail_dev_alloc:
- nouveau_ws_device_destroy(ndev);
+ nouveau_ws_device_destroy(ws_dev);
return result;
}
container_of(vk_pdev, struct nvk_physical_device, vk);
nvk_finish_wsi(pdev);
- nouveau_ws_device_destroy(pdev->dev);
+ nouveau_ws_device_destroy(pdev->ws_dev);
vk_physical_device_finish(&pdev->vk);
vk_free(&pdev->vk.instance->alloc, pdev);
}
struct nvk_physical_device {
struct vk_physical_device vk;
- struct nouveau_ws_device *dev;
+ struct nouveau_ws_device *ws_dev;
struct nv_device_info info;
struct wsi_device wsi_device;
if (pool->vk.query_count > 0) {
uint32_t bo_size = pool->query_start +
pool->query_stride * pool->vk.query_count;
- pool->bo = nouveau_ws_bo_new_mapped(dev->pdev->dev, bo_size, 0,
+ pool->bo = nouveau_ws_bo_new_mapped(dev->ws_dev, bo_size, 0,
NOUVEAU_WS_BO_GART,
NOUVEAU_WS_BO_RDWR,
&pool->bo_map);
return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
- if (dev->pdev->dev->debug_flags & NVK_DEBUG_ZERO_MEMORY)
+ if (dev->ws_dev->debug_flags & NVK_DEBUG_ZERO_MEMORY)
memset(pool->bo_map, 0, bo_size);
}
struct nouveau_ws_bo *push_bo;
void *push_map;
- push_bo = nouveau_ws_bo_new_mapped(dev->pdev->dev, 256 * 4, 0,
+ push_bo = nouveau_ws_bo_new_mapped(dev->ws_dev, 256 * 4, 0,
NOUVEAU_WS_BO_GART | NOUVEAU_WS_BO_MAP,
NOUVEAU_WS_BO_WR, &push_map);
if (push_bo == NULL)
"pointers pushbuf");
}
- const bool sync = dev->pdev->dev->debug_flags & NVK_DEBUG_PUSH_SYNC;
+ const bool sync = dev->ws_dev->debug_flags & NVK_DEBUG_PUSH_SYNC;
result = nvk_queue_submit_drm_nouveau(queue, submit, sync);
if ((sync && result != VK_SUCCESS) ||
- (dev->pdev->dev->debug_flags & NVK_DEBUG_PUSH_DUMP)) {
+ (dev->ws_dev->debug_flags & NVK_DEBUG_PUSH_DUMP)) {
nvk_queue_state_dump_push(dev, &queue->state, stderr);
for (unsigned i = 0; i < submit->command_buffer_count; i++) {
queue->vk.driver_submit = nvk_queue_submit;
void *empty_push_map;
- queue->empty_push = nouveau_ws_bo_new_mapped(dev->pdev->dev, 4096, 0,
+ queue->empty_push = nouveau_ws_bo_new_mapped(dev->ws_dev, 4096, 0,
NOUVEAU_WS_BO_GART |
NOUVEAU_WS_BO_MAP,
NOUVEAU_WS_BO_WR,
return VK_ERROR_DEVICE_LOST;
void *push_map;
- push_bo = nouveau_ws_bo_new_mapped(dev->pdev->dev, dw_count * 4, 0,
+ push_bo = nouveau_ws_bo_new_mapped(dev->ws_dev, dw_count * 4, 0,
NOUVEAU_WS_BO_GART | NOUVEAU_WS_BO_MAP,
NOUVEAU_WS_BO_WR, &push_map);
if (push_bo == NULL)
memcpy(push_map, dw, dw_count * 4);
- const bool debug_sync = dev->pdev->dev->debug_flags & NVK_DEBUG_PUSH_SYNC;
+ const bool debug_sync = dev->ws_dev->debug_flags & NVK_DEBUG_PUSH_SYNC;
result = nvk_queue_submit_simple_drm_nouveau(queue, dw_count, push_bo,
extra_bo_count, extra_bos,
sync || debug_sync);
if ((debug_sync && result != VK_SUCCESS) ||
- (dev->pdev->dev->debug_flags & NVK_DEBUG_PUSH_DUMP)) {
+ (dev->ws_dev->debug_flags & NVK_DEBUG_PUSH_DUMP)) {
struct nv_push push = {
.start = (uint32_t *)dw,
.end = (uint32_t *)dw + dw_count,
{
const uint32_t domain = (bo->flags & NOUVEAU_WS_BO_GART) ?
NOUVEAU_GEM_DOMAIN_GART :
- pb->dev->pdev->dev->local_mem_domain;
+ pb->dev->ws_dev->local_mem_domain;
for (uint32_t i = 0; i < pb->req.nr_buffers; i++) {
if (pb->req_bo[i].handle == bo->handle) {
static VkResult
push_submit(struct push_builder *pb, struct nvk_queue *queue, bool sync)
{
- int err = drmCommandWriteRead(pb->dev->pdev->dev->fd,
+ int err = drmCommandWriteRead(pb->dev->ws_dev->fd,
DRM_NOUVEAU_GEM_PUSHBUF,
&pb->req, sizeof(pb->req));
if (err) {
struct drm_nouveau_gem_cpu_prep req = {};
req.handle = pb->req_bo[0].handle;
req.flags = NOUVEAU_GEM_CPU_PREP_WRITE;
- err = drmCommandWrite(pb->dev->pdev->dev->fd,
+ err = drmCommandWrite(pb->dev->ws_dev->fd,
DRM_NOUVEAU_GEM_CPU_PREP,
&req, sizeof(req));
if (err) {