#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC)
static struct workqueue_struct *gpFenceStatusWq;
+static struct workqueue_struct *gpFenceUnorderedWq;
static PVRSRV_ERROR _NativeSyncInit(void)
{
return PVRSRV_ERROR_INIT_FAILURE;
}
+ gpFenceUnorderedWq = create_workqueue("pvr_fence_unordered");
+ if (!gpFenceUnorderedWq)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create fence unordered workqueue",
+ __func__));
+ destroy_workqueue(gpFenceStatusWq);
+ gpFenceStatusWq = NULL;
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
return PVRSRV_OK;
}
static void _NativeSyncDeinit(void)
{
+ destroy_workqueue(gpFenceUnorderedWq);
destroy_workqueue(gpFenceStatusWq);
}
return gpFenceStatusWq;
}
+
+struct workqueue_struct *NativeSyncGetFenceUnorderedWq(void)
+{
+ if (!gpFenceUnorderedWq)
+ {
+#if defined(DEBUG)
+ PVR_ASSERT(gpFenceUnorderedWq);
+#endif
+ return NULL;
+ }
+
+ return gpFenceUnorderedWq;
+}
#endif
PVRSRV_ERROR OSInitEnvData(void)
{
struct pvr_fence_context *fctx =
container_of(kref, struct pvr_fence_context, kref);
+ struct workqueue_struct *unordered_wq = NativeSyncGetFenceUnorderedWq();
PVR_FENCE_CTX_TRACE(fctx, "destroyed fence context (%s)\n", fctx->name);
trace_pvr_fence_context_destroy_kref(fctx);
- schedule_work(&fctx->destroy_work);
+ if (unordered_wq)
+ queue_work(unordered_wq, &fctx->destroy_work);
}
/**
static inline void pvr_fence_cleanup(void)
{
+ struct workqueue_struct *unordered_wq = NativeSyncGetFenceUnorderedWq();
+
/*
* Ensure all PVR fence contexts have been destroyed, by flushing
* the global workqueue.
*/
- flush_scheduled_work();
+ if (unordered_wq)
+ flush_workqueue(unordered_wq);
}
#if defined(PVR_FENCE_DEBUG)
@Return struct workqueue_struct ptr on success, NULL otherwise.
*/ /**************************************************************************/
struct workqueue_struct *NativeSyncGetFenceStatusWq(void);
+struct workqueue_struct *NativeSyncGetFenceUnorderedWq(void);
#endif
#endif /* __SERVICES_KERNEL_CLIENT__ */