* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <libsync.h>
#include "pipe/p_shader_tokens.h"
#include "pipe/p_context.h"
}
-static void virgl_flush_eq(struct virgl_context *ctx, void *closure)
+static void virgl_flush_eq(struct virgl_context *ctx, void *closure,
+ struct pipe_fence_handle **fence)
{
struct virgl_screen *rs = virgl_screen(ctx->base.screen);
+ int out_fence_fd = -1;
/* send the buffer to the remote side for decoding */
ctx->num_transfers = ctx->num_draws = 0;
- rs->vws->submit_cmd(rs->vws, ctx->cbuf);
+
+ rs->vws->submit_cmd(rs->vws, ctx->cbuf, ctx->cbuf->in_fence_fd,
+ ctx->cbuf->needs_out_fence_fd ? &out_fence_fd : NULL);
+
+ if (fence)
+ *fence = rs->vws->cs_create_fence(rs->vws, out_fence_fd);
virgl_encoder_set_sub_ctx(ctx, ctx->hw_sub_ctx_id);
enum pipe_flush_flags flags)
{
struct virgl_context *vctx = virgl_context(ctx);
- struct virgl_screen *rs = virgl_screen(ctx->screen);
struct virgl_buffer *buf, *tmp;
- if (fence)
- *fence = rs->vws->cs_create_fence(rs->vws);
+ if (flags & PIPE_FLUSH_FENCE_FD)
+ vctx->cbuf->needs_out_fence_fd = true;
LIST_FOR_EACH_ENTRY_SAFE(buf, tmp, &vctx->to_flush_bufs, flush_list) {
struct pipe_resource *res = &buf->base.u.b;
pipe_resource_reference(&res, NULL);
}
- virgl_flush_eq(vctx, vctx);
+ virgl_flush_eq(vctx, vctx, fence);
+
+ if (vctx->cbuf->in_fence_fd != -1) {
+ close(vctx->cbuf->in_fence_fd);
+ vctx->cbuf->in_fence_fd = -1;
+ }
+ vctx->cbuf->needs_out_fence_fd = false;
}
static struct pipe_sampler_view *virgl_create_sampler_view(struct pipe_context *ctx,
virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
}
+static void virgl_create_fence_fd(struct pipe_context *ctx,
+ struct pipe_fence_handle **fence, int fd)
+{
+ struct virgl_screen *rs = virgl_screen(ctx->screen);
+
+ *fence = rs->vws->cs_create_fence(rs->vws, fd);
+}
+
+static void virgl_fence_server_sync(struct pipe_context *ctx,
+ struct pipe_fence_handle *fence)
+{
+ struct virgl_context *vctx = virgl_context(ctx);
+ struct virgl_screen *rs = virgl_screen(ctx->screen);
+
+ rs->vws->fence_server_sync(rs->vws, vctx->cbuf, fence);
+}
+
static void virgl_set_shader_images(struct pipe_context *ctx,
enum pipe_shader_type shader,
unsigned start_slot, unsigned count,
vctx->framebuffer.zsbuf = NULL;
vctx->framebuffer.nr_cbufs = 0;
virgl_encoder_destroy_sub_ctx(vctx, vctx->hw_sub_ctx_id);
- virgl_flush_eq(vctx, vctx);
+ virgl_flush_eq(vctx, vctx, NULL);
rs->vws->cmd_buf_destroy(vctx->cbuf);
if (vctx->uploader)
vctx->base.resource_copy_region = virgl_resource_copy_region;
vctx->base.flush_resource = virgl_flush_resource;
vctx->base.blit = virgl_blit;
+ vctx->base.create_fence_fd = virgl_create_fence_fd;
+ vctx->base.fence_server_sync = virgl_fence_server_sync;
vctx->base.set_shader_buffers = virgl_set_shader_buffers;
vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
case PIPE_CAP_VIDEO_MEMORY:
return 0;
case PIPE_CAP_NATIVE_FENCE_FD:
- return 0;
+ return !!vscreen->vws->supports_fences;
default:
return u_pipe_screen_get_param_defaults(screen, param);
}
return vws->fence_wait(vws, fence, timeout);
}
+static int virgl_fence_get_fd(struct pipe_screen *screen,
+ struct pipe_fence_handle *fence)
+{
+ struct virgl_screen *vscreen = virgl_screen(screen);
+ struct virgl_winsys *vws = vscreen->vws;
+
+ return vws->fence_get_fd(vws, fence);
+}
+
static uint64_t
virgl_get_timestamp(struct pipe_screen *_screen)
{
screen->base.fence_reference = virgl_fence_reference;
//screen->base.fence_signalled = virgl_fence_signalled;
screen->base.fence_finish = virgl_fence_finish;
+ screen->base.fence_get_fd = virgl_fence_get_fd;
virgl_init_screen_resource_functions(&screen->base);
struct virgl_cmd_buf {
unsigned cdw;
uint32_t *buf;
+ int in_fence_fd;
+ bool needs_out_fence_fd;
};
struct virgl_winsys {
unsigned pci_id;
+ int supports_fences; /* In/Out fences are supported */
void (*destroy)(struct virgl_winsys *vws);
void (*cmd_buf_destroy)(struct virgl_cmd_buf *buf);
void (*emit_res)(struct virgl_winsys *vws, struct virgl_cmd_buf *buf, struct virgl_hw_res *res, boolean write_buffer);
- int (*submit_cmd)(struct virgl_winsys *vws, struct virgl_cmd_buf *buf);
+ int (*submit_cmd)(struct virgl_winsys *vws, struct virgl_cmd_buf *buf,
+ int32_t in_fence_fd, int32_t *out_fence_fd);
boolean (*res_is_referenced)(struct virgl_winsys *vws,
struct virgl_cmd_buf *buf,
int (*get_caps)(struct virgl_winsys *vws, struct virgl_drm_caps *caps);
/* fence */
- struct pipe_fence_handle *(*cs_create_fence)(struct virgl_winsys *vws);
+ struct pipe_fence_handle *(*cs_create_fence)(struct virgl_winsys *vws, int fd);
bool (*fence_wait)(struct virgl_winsys *vws,
struct pipe_fence_handle *fence,
uint64_t timeout);
unsigned level, unsigned layer,
void *winsys_drawable_handle,
struct pipe_box *sub_box);
+ void (*fence_server_sync)(struct virgl_winsys *vws,
+ struct virgl_cmd_buf *cbuf,
+ struct pipe_fence_handle *fence);
+
+ int (*fence_get_fd)(struct virgl_winsys *vws,
+ struct pipe_fence_handle *fence);
};
/* this defaults all newer caps,
#include "virgl/virgl_public.h"
#include <xf86drm.h>
+#include <libsync.h>
#include "virtgpu_drm.h"
#include "virgl_drm_winsys.h"
#include "virgl_drm_public.h"
+
+#define VIRGL_DRM_VERSION(major, minor) ((major) << 16 | (minor))
+#define VIRGL_DRM_VERSION_FENCE_FD VIRGL_DRM_VERSION(1, 0)
+
+
static inline boolean can_cache_resource(struct virgl_hw_res *res)
{
return res->cacheable == TRUE;
if (res->ptr)
os_munmap(res->ptr, res->size);
+ if (res->fence_fd != -1)
+ close(res->fence_fd);
+
memset(&args, 0, sizeof(args));
args.handle = res->bo_handle;
drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
res->stride = stride;
pipe_reference_init(&res->reference, 1);
res->num_cs_references = 0;
+ res->fence_fd = -1;
return res;
}
res->stride = info_arg.stride;
pipe_reference_init(&res->reference, 1);
res->num_cs_references = 0;
+ res->fence_fd = -1;
util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)handle, res);
}
cbuf->base.buf = cbuf->buf;
+ cbuf->base.in_fence_fd = -1;
return &cbuf->base;
}
}
static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
- struct virgl_cmd_buf *_cbuf)
+ struct virgl_cmd_buf *_cbuf,
+ int in_fence_fd, int *out_fence_fd)
{
struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
eb.size = cbuf->base.cdw * 4;
eb.num_bo_handles = cbuf->cres;
eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
+ eb.fence_fd = -1;
+
+ if (in_fence_fd != -1) {
+ eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_IN;
+ eb.fence_fd = in_fence_fd;
+ }
+
+ if (out_fence_fd != NULL)
+ eb.flags |= VIRTGPU_EXECBUF_FENCE_FD_OUT;
ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
if (ret == -1)
fprintf(stderr,"got error from kernel - expect bad rendering %d\n", errno);
cbuf->base.cdw = 0;
+ if (out_fence_fd != NULL)
+ *out_fence_fd = eb.fence_fd;
+
virgl_drm_release_all_res(qdws, cbuf);
memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
}
static struct pipe_fence_handle *
-virgl_cs_create_fence(struct virgl_winsys *vws)
+virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
{
struct virgl_hw_res *res;
VIRGL_BIND_CUSTOM,
8, 1, 1, 0, 0, 0, 8);
+ res->fence_fd = fd;
return (struct pipe_fence_handle *)res;
}
return TRUE;
}
virgl_drm_resource_wait(vws, res);
+
+ if (res->fence_fd != -1) {
+ int ret = sync_wait(res->fence_fd, timeout / 1000000);
+ return ret == 0;
+ }
+
return TRUE;
}
virgl_hw_res(src));
}
+static void virgl_fence_server_sync(struct virgl_winsys *vws,
+ struct virgl_cmd_buf *cbuf,
+ struct pipe_fence_handle *fence)
+{
+ struct virgl_hw_res *hw_res = virgl_hw_res(fence);
+
+ /* if not an external fence, then nothing more to do without preemption: */
+ if (hw_res->fence_fd == -1)
+ return;
+
+ sync_accumulate("virgl", &cbuf->in_fence_fd, hw_res->fence_fd);
+}
+
+static int virgl_fence_get_fd(struct virgl_winsys *vws,
+ struct pipe_fence_handle *fence)
+{
+ struct virgl_hw_res *hw_res = virgl_hw_res(fence);
+
+ return dup(hw_res->fence_fd);
+}
+
+static int virgl_drm_get_version(int fd)
+{
+ int ret;
+ drmVersionPtr version;
+
+ version = drmGetVersion(fd);
+
+ if (!version)
+ ret = -EFAULT;
+ else if (version->version_major != 0)
+ ret = -EINVAL;
+ else
+ ret = version->version_minor;
+
+ drmFreeVersion(version);
+
+ return ret;
+}
static struct virgl_winsys *
virgl_drm_winsys_create(int drmFD)
{
struct virgl_drm_winsys *qdws;
+ int drm_version;
int ret;
int gl = 0;
struct drm_virtgpu_getparam getparam = {0};
if (ret < 0 || !gl)
return NULL;
+ drm_version = virgl_drm_get_version(qdws->fd);
+ if (drm_version < 0)
+ return NULL;
+
qdws = CALLOC_STRUCT(virgl_drm_winsys);
if (!qdws)
return NULL;
qdws->base.cs_create_fence = virgl_cs_create_fence;
qdws->base.fence_wait = virgl_fence_wait;
qdws->base.fence_reference = virgl_fence_reference;
+ qdws->base.fence_server_sync = virgl_fence_server_sync;
+ qdws->base.fence_get_fd = virgl_fence_get_fd;
+ qdws->base.supports_fences = drm_version >= VIRGL_DRM_VERSION_FENCE_FD;
qdws->base.get_caps = virgl_drm_get_caps;
+
uint32_t value = 0;
getparam.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX;
getparam.value = (uint64_t)(uintptr_t)&value;
int64_t start, end;
boolean flinked;
uint32_t flink;
+ int fence_fd;
};
struct virgl_drm_winsys
{
struct virgl_winsys base;
int fd;
+ int drm_version;
struct list_head delayed;
int num_delayed;
unsigned usecs;
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+/*
+ * virtgpu execbuffer flags
+ */
+#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
+#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_FLAGS (\
+ VIRTGPU_EXECBUF_FENCE_FD_IN |\
+ VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ 0)
+
struct drm_virtgpu_map {
uint64_t offset; /* use for mmap system call */
uint32_t handle;
uint64_t command; /* void* */
uint64_t bo_handles;
uint32_t num_bo_handles;
- uint32_t pad;
+ int32_t fence_fd;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
- DRM_IOW(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
struct drm_virtgpu_execbuffer)
#define DRM_IOCTL_VIRTGPU_GETPARAM \
}
static int virgl_vtest_winsys_submit_cmd(struct virgl_winsys *vws,
- struct virgl_cmd_buf *_cbuf)
+ struct virgl_cmd_buf *_cbuf,
+ int in_fence_fd, int *out_fence_fd)
{
struct virgl_vtest_winsys *vtws = virgl_vtest_winsys(vws);
struct virgl_vtest_cmd_buf *cbuf = virgl_vtest_cmd_buf(_cbuf);
if (cbuf->base.cdw == 0)
return 0;
+ assert(in_fence_fd == -1);
+ assert(out_fence_fd == NULL);
+
ret = virgl_vtest_submit_cmd(vtws, cbuf);
virgl_vtest_release_all_res(vtws, cbuf);
}
static struct pipe_fence_handle *
-virgl_cs_create_fence(struct virgl_winsys *vws)
+virgl_cs_create_fence(struct virgl_winsys *vws, int fd)
{
struct virgl_hw_res *res;
vtws->base.cs_create_fence = virgl_cs_create_fence;
vtws->base.fence_wait = virgl_fence_wait;
vtws->base.fence_reference = virgl_fence_reference;
+ vtws->base.supports_fences = 0;
vtws->base.flush_frontbuffer = virgl_vtest_flush_frontbuffer;
+
return &vtws->base;
}