struct fd_bo *suballoc_bo;
uint32_t suballoc_offset;
simple_mtx_t suballoc_lock;
+
+ struct util_queue submit_queue;
};
#define foreach_submit(name, list) \
*/
uint32_t last_fence;
+ /**
+ * The last fence seqno that was flushed to kernel (doesn't mean that it
+ * is complete, just that the kernel knows about it)
+ */
+ uint32_t last_submit_fence;
+
+ uint32_t last_enqueue_fence; /* just for debugging */
+
struct fd_bo *control_mem;
volatile struct fd_pipe_control *control;
msm_device_destroy(struct fd_device *dev)
{
struct msm_device *msm_dev = to_msm_device(dev);
- if (util_queue_is_initialized(&msm_dev->submit_queue)) {
- util_queue_destroy(&msm_dev->submit_queue);
- }
free(msm_dev);
}
* thread's comm truncating the interesting part of the
* process name.
*/
- util_queue_init(&msm_dev->submit_queue, "sq", 8, 1, 0, NULL);
+ util_queue_init(&dev->submit_queue, "sq", 8, 1, 0, NULL);
}
if (version->version_minor >= FD_VERSION_CACHED_COHERENT) {
struct msm_device {
struct fd_device base;
- struct util_queue submit_queue;
};
FD_DEFINE_CAST(fd_device, msm_device);
struct slab_parent_pool ring_pool;
/**
- * The last fence seqno that was flushed to kernel (doesn't mean that it
- * is complete, just that the kernel knows about it)
- */
- uint32_t last_submit_fence;
-
- uint32_t last_enqueue_fence; /* just for debugging */
-
- /**
* If we *ever* see an in-fence-fd, assume that userspace is
* not relying on implicit fences.
*/
free(submit_bos);
pthread_mutex_lock(&flush_mtx);
- assert(fd_fence_before(msm_pipe->last_submit_fence, msm_submit->base.fence));
- msm_pipe->last_submit_fence = msm_submit->base.fence;
+ assert(fd_fence_before(msm_pipe->base.last_submit_fence, msm_submit->base.fence));
+ msm_pipe->base.last_submit_fence = msm_submit->base.fence;
pthread_cond_broadcast(&flush_cnd);
pthread_mutex_unlock(&flush_mtx);
{
struct fd_submit *submit = last_submit(submit_list);
struct msm_submit_sp *msm_submit = to_msm_submit_sp(submit);
- struct msm_device *msm_dev = to_msm_device(submit->pipe->dev);
list_replace(submit_list, &msm_submit->submit_list);
list_inithead(submit_list);
DEBUG_MSG("enqueue: %u", submit->fence);
- util_queue_add_job(&msm_dev->submit_queue,
+ util_queue_add_job(&submit->pipe->dev->submit_queue,
submit, fence,
msm_submit_sp_flush_execute,
msm_submit_sp_flush_cleanup,
struct fd_submit_fence *out_fence)
{
struct fd_device *dev = submit->pipe->dev;
- struct msm_pipe *msm_pipe = to_msm_pipe(submit->pipe);
+ struct fd_pipe *pipe = submit->pipe;
/* Acquire lock before flush_prep() because it is possible to race between
* this and pipe->flush():
bool has_shared = msm_submit_sp_flush_prep(submit, in_fence_fd, out_fence);
- assert(fd_fence_before(msm_pipe->last_enqueue_fence, submit->fence));
- msm_pipe->last_enqueue_fence = submit->fence;
+ assert(fd_fence_before(pipe->last_enqueue_fence, submit->fence));
+ pipe->last_enqueue_fence = submit->fence;
/* If we don't need an out-fence, we can defer the submit.
*
void
msm_pipe_sp_flush(struct fd_pipe *pipe, uint32_t fence)
{
- struct msm_pipe *msm_pipe = to_msm_pipe(pipe);
struct fd_device *dev = pipe->dev;
struct list_head submit_list;
simple_mtx_lock(&dev->submit_lock);
- assert(!fd_fence_after(fence, msm_pipe->last_enqueue_fence));
+ assert(!fd_fence_after(fence, pipe->last_enqueue_fence));
foreach_submit_safe (deferred_submit, &dev->deferred_submits) {
/* We should never have submits from multiple pipes in the deferred
* them to the kernel
*/
pthread_mutex_lock(&flush_mtx);
- while (fd_fence_before(msm_pipe->last_submit_fence, fence)) {
+ while (fd_fence_before(pipe->last_submit_fence, fence)) {
pthread_cond_wait(&flush_cnd, &flush_mtx);
}
pthread_mutex_unlock(&flush_mtx);