* For now they can stay, but will likely change/move before final:
*/
unsigned bmSetFence( struct intel_context * );
+unsigned bmSetFenceLock( struct intel_context * );
unsigned bmLockAndFence( struct intel_context *intel );
int bmTestFence( struct intel_context *, unsigned fence );
void bmFinishFence( struct intel_context *, unsigned fence );
+void bmFinishFenceLock( struct intel_context *, unsigned fence );
void bm_fake_NotifyContendedLockTake( struct intel_context * );
}
-
static int check_fenced( struct intel_context *intel )
{
struct bufmgr *bm = intel->bm;
return intel->bm->last_fence;
}
+unsigned bmSetFenceLock( struct intel_context *intel )
+{
+ LOCK(intel->bm);
+ bmSetFence(intel);
+ UNLOCK(intel->bm);
+}
unsigned bmLockAndFence( struct intel_context *intel )
{
if (intel->bm->need_fence) {
LOCK_HARDWARE(intel);
+ LOCK(intel->bm);
bmSetFence(intel);
+ UNLOCK(intel->bm);
UNLOCK_HARDWARE(intel);
}
check_fenced(intel);
}
-
+void bmFinishFenceLock( struct intel_context *intel, unsigned fence )
+{
+ LOCK(intel->bm);
+ bmFinishFence(intel, fence);
+ UNLOCK(intel->bm);
+}
/* Specifically ignore texture memory sharing.
intelFlush( &intel->ctx );
- bmFinishFence(intel, intel->last_swap_fence);
+ bmFinishFenceLock(intel, intel->last_swap_fence);
/* The LOCK_HARDWARE is required for the cliprects. Buffer offsets
* should work regardless.
intel_batchbuffer_flush( intel->batch );
intel->second_last_swap_fence = intel->last_swap_fence;
- intel->last_swap_fence = bmSetFence( intel );
+ intel->last_swap_fence = bmSetFenceLock( intel );
UNLOCK_HARDWARE( intel );
if (!rect)