* for possible GL errors.
*/
if (!user_buffer_mask || count <= 0 || instance_count <= 0 ||
- ctx->GLThread.draw_always_async) {
+ /* This will just generate GL_INVALID_OPERATION, as it should. */
+ ctx->GLThread.inside_begin_end ||
+ ctx->CurrentServerDispatch == ctx->ContextLost ||
+ ctx->GLThread.ListMode) {
if (instance_count == 1 && baseinstance == 0 && drawid == 0) {
int cmd_size = sizeof(struct marshal_cmd_DrawArrays);
struct marshal_cmd_DrawArrays *cmd =
struct glthread_attrib_binding buffers[VERT_ATTRIB_MAX];
unsigned user_buffer_mask =
ctx->API == API_OPENGL_CORE || draw_count <= 0 ||
- ctx->GLThread.draw_always_async ? 0 : get_user_buffer_mask(ctx);
+ ctx->CurrentServerDispatch == ctx->ContextLost ||
+ ctx->GLThread.inside_begin_end ? 0 : get_user_buffer_mask(ctx);
if (user_buffer_mask) {
unsigned min_index = ~0;
* This is also an error path. Zero counts should still call the driver
* for possible GL errors.
*/
- if (ctx->GLThread.draw_always_async || count <= 0 || instance_count <= 0 ||
+ if (count <= 0 || instance_count <= 0 ||
!is_index_type_valid(type) ||
- (!user_buffer_mask && !has_user_indices)) {
+ (!user_buffer_mask && !has_user_indices) ||
+ ctx->CurrentServerDispatch == ctx->ContextLost ||
+ /* This will just generate GL_INVALID_OPERATION, as it should. */
+ ctx->GLThread.inside_begin_end ||
+ ctx->GLThread.ListMode) {
if (instance_count == 1 && baseinstance == 0 && drawid == 0) {
int cmd_size = sizeof(struct marshal_cmd_DrawElementsBaseVertex);
struct marshal_cmd_DrawElementsBaseVertex *cmd =
* a GL error, we don't upload anything.
*/
if (draw_count > 0 && is_index_type_valid(type) &&
- !ctx->GLThread.draw_always_async) {
+ ctx->CurrentServerDispatch != ctx->ContextLost &&
+ !ctx->GLThread.inside_begin_end) {
user_buffer_mask = ctx->API == API_OPENGL_CORE ? 0 : get_user_buffer_mask(ctx);
has_user_indices = vao->CurrentElementBufferName == 0;
}
unmap_draw_indirect_params(ctx);
}
+static inline bool
+draw_indirect_async_allowed(struct gl_context *ctx, unsigned user_buffer_mask)
+{
+ return ctx->API != API_OPENGL_COMPAT ||
+ /* This will just generate GL_INVALID_OPERATION, as it should. */
+ ctx->GLThread.inside_begin_end ||
+ ctx->GLThread.ListMode ||
+ ctx->CurrentServerDispatch == ctx->ContextLost ||
+ /* If the DrawIndirect buffer is bound, it behaves like profile != compat
+ * if there are no user VBOs. */
+ (ctx->GLThread.CurrentDrawIndirectBufferName && !user_buffer_mask);
+}
+
struct marshal_cmd_DrawArraysIndirect
{
struct marshal_cmd_base cmd_base;
unsigned user_buffer_mask =
_mesa_is_gles31(ctx) ? 0 : vao->UserPointerMask & vao->BufferEnabled;
- if (ctx->GLThread.draw_always_async ||
- !ctx->GLThread.CurrentDrawIndirectBufferName ||
- !user_buffer_mask) {
+ if (draw_indirect_async_allowed(ctx, user_buffer_mask)) {
int cmd_size = sizeof(struct marshal_cmd_DrawArraysIndirect);
struct marshal_cmd_DrawArraysIndirect *cmd;
unsigned user_buffer_mask =
_mesa_is_gles31(ctx) ? 0 : vao->UserPointerMask & vao->BufferEnabled;
- if (ctx->GLThread.draw_always_async || !is_index_type_valid(type) ||
- !ctx->GLThread.CurrentDrawIndirectBufferName ||
- !vao->CurrentElementBufferName || !user_buffer_mask) {
+ if (draw_indirect_async_allowed(ctx, user_buffer_mask) ||
+ !is_index_type_valid(type)) {
int cmd_size = sizeof(struct marshal_cmd_DrawElementsIndirect);
struct marshal_cmd_DrawElementsIndirect *cmd;
unsigned user_buffer_mask =
_mesa_is_gles31(ctx) ? 0 : vao->UserPointerMask & vao->BufferEnabled;
- if (ctx->GLThread.draw_always_async ||
- !ctx->GLThread.CurrentDrawIndirectBufferName ||
- !user_buffer_mask) {
+ if (draw_indirect_async_allowed(ctx, user_buffer_mask) ||
+ primcount <= 0) {
int cmd_size = sizeof(struct marshal_cmd_MultiDrawArraysIndirect);
struct marshal_cmd_MultiDrawArraysIndirect *cmd;
unsigned user_buffer_mask =
_mesa_is_gles31(ctx) ? 0 : vao->UserPointerMask & vao->BufferEnabled;
- if (ctx->GLThread.draw_always_async || !is_index_type_valid(type) ||
- !ctx->GLThread.CurrentDrawIndirectBufferName ||
- !vao->CurrentElementBufferName || !user_buffer_mask) {
+ if (draw_indirect_async_allowed(ctx, user_buffer_mask) ||
+ primcount <= 0 ||
+ !is_index_type_valid(type)) {
int cmd_size = sizeof(struct marshal_cmd_MultiDrawElementsIndirect);
struct marshal_cmd_MultiDrawElementsIndirect *cmd;
unsigned user_buffer_mask =
_mesa_is_gles31(ctx) ? 0 : vao->UserPointerMask & vao->BufferEnabled;
- if (ctx->GLThread.draw_always_async || !user_buffer_mask ||
+ if (draw_indirect_async_allowed(ctx, user_buffer_mask) ||
+ /* This will just generate GL_INVALID_OPERATION because Draw*IndirectCount
+ * functions forbid a user indirect buffer in the Compat profile. */
!ctx->GLThread.CurrentDrawIndirectBufferName) {
int cmd_size =
sizeof(struct marshal_cmd_MultiDrawArraysIndirectCountARB);
unsigned user_buffer_mask =
_mesa_is_gles31(ctx) ? 0 : vao->UserPointerMask & vao->BufferEnabled;
- if (ctx->GLThread.draw_always_async || !user_buffer_mask ||
+ if (draw_indirect_async_allowed(ctx, user_buffer_mask) ||
+ /* This will just generate GL_INVALID_OPERATION because Draw*IndirectCount
+ * functions forbid a user indirect buffer in the Compat profile. */
!ctx->GLThread.CurrentDrawIndirectBufferName ||
!is_index_type_valid(type)) {
int cmd_size = sizeof(struct marshal_cmd_MultiDrawElementsIndirectCountARB);
return ctx->GLThread.CurrentPixelUnpackBufferName == 0;
}
-static inline void
-_mesa_glthread_update_draw_always_async(struct gl_context *ctx)
-{
- /* Executing erroneous cases will just generate GL_INVALID_OPERATION. */
- ctx->GLThread.draw_always_async =
- ctx->API == API_OPENGL_CORE ||
- ctx->CurrentServerDispatch == ctx->ContextLost ||
- ctx->GLThread.inside_begin_end ||
- ctx->GLThread.ListMode;
-}
-
static inline unsigned
_mesa_buffer_enum_to_count(GLenum buffer)
{
static inline void
_mesa_glthread_NewList(struct gl_context *ctx, GLuint list, GLenum mode)
{
- if (!ctx->GLThread.ListMode) {
+ if (!ctx->GLThread.ListMode)
ctx->GLThread.ListMode = MIN2(mode, 0xffff);
- _mesa_glthread_update_draw_always_async(ctx);
- }
}
static inline void
return;
ctx->GLThread.ListMode = 0;
- _mesa_glthread_update_draw_always_async(ctx);
/* Track the last display list change. */
p_atomic_set(&ctx->GLThread.LastDListChangeBatchIndex, ctx->GLThread.next);
}
}
-static inline void
-_mesa_glthread_Begin(struct gl_context *ctx)
-{
- ctx->GLThread.inside_begin_end = true;
- _mesa_glthread_update_draw_always_async(ctx);
-}
-
-static inline void
-_mesa_glthread_End(struct gl_context *ctx)
-{
- ctx->GLThread.inside_begin_end = false;
- _mesa_glthread_update_draw_always_async(ctx);
-}
-
#endif /* MARSHAL_H */