struct virgl_renderer_capset_venus data;
} capset;
+ uint32_t shmem_blob_mem;
+
/* note that we use gem_handle instead of res_id to index because
* res_id is monotonically increasing by default (see
* virtio_gpu_resource_id_get)
uint32_t res_id;
uint32_t gem_handle = virtgpu_ioctl_resource_create_blob(
- gpu, VIRTGPU_BLOB_MEM_GUEST, VIRTGPU_BLOB_FLAG_USE_MAPPABLE, size, 0,
+ gpu, gpu->shmem_blob_mem, VIRTGPU_BLOB_FLAG_USE_MAPPABLE, size, 0,
&res_id);
if (!gem_handle)
return NULL;
capset->vk_ext_command_serialization_spec_version;
info->vk_mesa_venus_protocol_spec_version =
capset->vk_mesa_venus_protocol_spec_version;
+ info->supports_blob_id_0 = capset->supports_blob_id_0;
}
static void
vk_free(alloc, gpu);
}
+static void
+virtgpu_init_shmem_blob_mem(struct virtgpu *gpu)
+{
+ /* VIRTGPU_BLOB_MEM_GUEST allocates from the guest system memory. They are
+ * logically contiguous in the guest but are sglists (iovecs) in the host.
+ * That makes them slower to process in the host. With host process
+ * isolation, it also becomes impossible for the host to access sglists
+ * directly.
+ *
+ * While there are ideas (and shipped code in some cases) such as creating
+ * udmabufs from sglists, or having a dedicated guest heap, it seems the
+ * easiest way is to reuse VIRTGPU_BLOB_MEM_HOST3D. That is, when the
+ * renderer sees a request to export a blob where
+ *
+ * - blob_mem is VIRTGPU_BLOB_MEM_HOST3D
+ * - blob_flags is VIRTGPU_BLOB_FLAG_USE_MAPPABLE
+ * - blob_id is 0
+ *
+ * it allocates a host shmem.
+ *
+ * TODO cache shmems as they are costly to set up and usually require syncs
+ */
+ gpu->shmem_blob_mem = gpu->capset.data.supports_blob_id_0
+ ? VIRTGPU_BLOB_MEM_HOST3D
+ : VIRTGPU_BLOB_MEM_GUEST;
+}
+
static VkResult
virtgpu_init_context(struct virtgpu *gpu)
{
if (result != VK_SUCCESS)
return result;
+ virtgpu_init_shmem_blob_mem(gpu);
+
gpu->base.ops.destroy = virtgpu_destroy;
gpu->base.ops.get_info = virtgpu_get_info;
gpu->base.ops.submit = virtgpu_submit;
struct virgl_renderer_capset_venus data;
} capset;
+ uint32_t shmem_blob_mem;
+
struct util_sparse_array shmem_array;
struct util_sparse_array bo_array;
};
mtx_lock(&vtest->sock_mutex);
int res_fd;
uint32_t res_id = vtest_vcmd_resource_create_blob(
- vtest, VCMD_BLOB_TYPE_GUEST, VCMD_BLOB_FLAG_MAPPABLE, size, 0, &res_fd);
+ vtest, vtest->shmem_blob_mem, VCMD_BLOB_FLAG_MAPPABLE, size, 0,
+ &res_fd);
assert(res_id > 0 && res_fd >= 0);
mtx_unlock(&vtest->sock_mutex);
capset->vk_ext_command_serialization_spec_version;
info->vk_mesa_venus_protocol_spec_version =
capset->vk_mesa_venus_protocol_spec_version;
+ info->supports_blob_id_0 = capset->supports_blob_id_0;
}
static void
if (result != VK_SUCCESS)
return result;
+ /* see virtgpu_init_shmem_blob_mem */
+ vtest->shmem_blob_mem = vtest->capset.data.supports_blob_id_0
+ ? VCMD_BLOB_TYPE_HOST3D
+ : VCMD_BLOB_TYPE_GUEST;
+
vtest_vcmd_context_init(vtest, vtest->capset.id);
vtest->base.ops.destroy = vtest_destroy;