51ae9cc7bf47e714983d5f117be2acb4fc1a0f65
[profile/ivi/mesa.git] / src / mesa / drivers / dri / radeon / radeon_common.c
1 /**************************************************************************
2
3 Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
4
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28
29 **************************************************************************/
30
31 /*
32  * Authors:
33  *   Keith Whitwell <keith@tungstengraphics.com>
34  */
35
36 /*
37    - Scissor implementation
38    - buffer swap/copy ioctls
39    - finish/flush
40    - state emission
41    - cmdbuffer management
42 */
43
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/imports.h"
47 #include "main/context.h"
48 #include "main/enums.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52
53 #include "vblank.h"
54
55 #include "radeon_common.h"
56 #include "radeon_bocs_wrapper.h"
57 #include "radeon_lock.h"
58 #include "radeon_drm.h"
59 #include "radeon_queryobj.h"
60
61 /**
62  * Enable verbose debug output for emit code.
63  * 0 no output
64  * 1 most output
65  * 2 also print state alues
66  */
67 #define RADEON_CMDBUF         0
68
69 /* =============================================================
70  * Scissoring
71  */
72
73 static GLboolean intersect_rect(drm_clip_rect_t * out,
74                                 drm_clip_rect_t * a, drm_clip_rect_t * b)
75 {
76         *out = *a;
77         if (b->x1 > out->x1)
78                 out->x1 = b->x1;
79         if (b->y1 > out->y1)
80                 out->y1 = b->y1;
81         if (b->x2 < out->x2)
82                 out->x2 = b->x2;
83         if (b->y2 < out->y2)
84                 out->y2 = b->y2;
85         if (out->x1 >= out->x2)
86                 return GL_FALSE;
87         if (out->y1 >= out->y2)
88                 return GL_FALSE;
89         return GL_TRUE;
90 }
91
92 void radeonRecalcScissorRects(radeonContextPtr radeon)
93 {
94         drm_clip_rect_t *out;
95         int i;
96
97         /* Grow cliprect store?
98          */
99         if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
100                 while (radeon->state.scissor.numAllocedClipRects <
101                        radeon->numClipRects) {
102                         radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
103                         radeon->state.scissor.numAllocedClipRects *= 2;
104                 }
105
106                 if (radeon->state.scissor.pClipRects)
107                         FREE(radeon->state.scissor.pClipRects);
108
109                 radeon->state.scissor.pClipRects =
110                         MALLOC(radeon->state.scissor.numAllocedClipRects *
111                                sizeof(drm_clip_rect_t));
112
113                 if (radeon->state.scissor.pClipRects == NULL) {
114                         radeon->state.scissor.numAllocedClipRects = 0;
115                         return;
116                 }
117         }
118
119         out = radeon->state.scissor.pClipRects;
120         radeon->state.scissor.numClipRects = 0;
121
122         for (i = 0; i < radeon->numClipRects; i++) {
123                 if (intersect_rect(out,
124                                    &radeon->pClipRects[i],
125                                    &radeon->state.scissor.rect)) {
126                         radeon->state.scissor.numClipRects++;
127                         out++;
128                 }
129         }
130
131         if (radeon->vtbl.update_scissor)
132            radeon->vtbl.update_scissor(radeon->glCtx);
133 }
134
135 void radeon_get_cliprects(radeonContextPtr radeon,
136                           struct drm_clip_rect **cliprects,
137                           unsigned int *num_cliprects,
138                           int *x_off, int *y_off)
139 {
140         __DRIdrawable *dPriv = radeon_get_drawable(radeon);
141         struct radeon_framebuffer *rfb = dPriv->driverPrivate;
142
143         if (radeon->constant_cliprect) {
144                 radeon->fboRect.x1 = 0;
145                 radeon->fboRect.y1 = 0;
146                 radeon->fboRect.x2 = radeon->glCtx->DrawBuffer->Width;
147                 radeon->fboRect.y2 = radeon->glCtx->DrawBuffer->Height;
148
149                 *cliprects = &radeon->fboRect;
150                 *num_cliprects = 1;
151                 *x_off = 0;
152                 *y_off = 0;
153         } else if (radeon->front_cliprects ||
154                    rfb->pf_active || dPriv->numBackClipRects == 0) {
155                 *cliprects = dPriv->pClipRects;
156                 *num_cliprects = dPriv->numClipRects;
157                 *x_off = dPriv->x;
158                 *y_off = dPriv->y;
159         } else {
160                 *num_cliprects = dPriv->numBackClipRects;
161                 *cliprects = dPriv->pBackClipRects;
162                 *x_off = dPriv->backX;
163                 *y_off = dPriv->backY;
164         }
165 }
166
167 /**
168  * Update cliprects and scissors.
169  */
170 void radeonSetCliprects(radeonContextPtr radeon)
171 {
172         __DRIdrawable *const drawable = radeon_get_drawable(radeon);
173         __DRIdrawable *const readable = radeon_get_readable(radeon);
174
175         if(drawable == NULL && readable == NULL)
176                 return;
177
178         struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
179         struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
180         int x_off, y_off;
181
182         radeon_get_cliprects(radeon, &radeon->pClipRects,
183                              &radeon->numClipRects, &x_off, &y_off);
184
185         if ((draw_rfb->base.Width != drawable->w) ||
186             (draw_rfb->base.Height != drawable->h)) {
187                 _mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
188                                          drawable->w, drawable->h);
189                 draw_rfb->base.Initialized = GL_TRUE;
190         }
191
192         if (drawable != readable) {
193                 if ((read_rfb->base.Width != readable->w) ||
194                     (read_rfb->base.Height != readable->h)) {
195                         _mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
196                                                  readable->w, readable->h);
197                         read_rfb->base.Initialized = GL_TRUE;
198                 }
199         }
200
201         if (radeon->state.scissor.enabled)
202                 radeonRecalcScissorRects(radeon);
203
204 }
205
206
207
208 void radeonUpdateScissor( struct gl_context *ctx )
209 {
210         radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
211         GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
212         GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
213         int x1, y1, x2, y2;
214         int min_x, min_y, max_x, max_y;
215
216         if (!ctx->DrawBuffer)
217             return;
218         min_x = min_y = 0;
219         max_x = ctx->DrawBuffer->Width - 1;
220         max_y = ctx->DrawBuffer->Height - 1;
221
222         if ( !ctx->DrawBuffer->Name ) {
223                 x1 = x;
224                 y1 = ctx->DrawBuffer->Height - (y + h);
225                 x2 = x + w - 1;
226                 y2 = y1 + h - 1;
227         } else {
228                 x1 = x;
229                 y1 = y;
230                 x2 = x + w - 1;
231                 y2 = y + h - 1;
232
233         }
234
235         rmesa->state.scissor.rect.x1 = CLAMP(x1,  min_x, max_x);
236         rmesa->state.scissor.rect.y1 = CLAMP(y1,  min_y, max_y);
237         rmesa->state.scissor.rect.x2 = CLAMP(x2,  min_x, max_x);
238         rmesa->state.scissor.rect.y2 = CLAMP(y2,  min_y, max_y);
239
240         radeonRecalcScissorRects( rmesa );
241 }
242
243 /* =============================================================
244  * Scissoring
245  */
246
247 void radeonScissor(struct gl_context* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
248 {
249         radeonContextPtr radeon = RADEON_CONTEXT(ctx);
250         if (ctx->Scissor.Enabled) {
251                 /* We don't pipeline cliprect changes */
252                 radeon_firevertices(radeon);
253                 radeonUpdateScissor(ctx);
254         }
255 }
256
257 /* ================================================================
258  * SwapBuffers with client-side throttling
259  */
260
261 uint32_t radeonGetAge(radeonContextPtr radeon)
262 {
263         drm_radeon_getparam_t gp;
264         int ret;
265         uint32_t age;
266
267         gp.param = RADEON_PARAM_LAST_CLEAR;
268         gp.value = (int *)&age;
269         ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
270                                   &gp, sizeof(gp));
271         if (ret) {
272                 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
273                         ret);
274                 exit(1);
275         }
276
277         return age;
278 }
279
280
281 /* wait for idle */
282 void radeonWaitForIdleLocked(radeonContextPtr radeon)
283 {
284         int ret;
285         int i = 0;
286
287         do {
288                 ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
289                 if (ret)
290                         DO_USLEEP(1);
291         } while (ret && ++i < 100);
292
293         if (ret < 0) {
294                 UNLOCK_HARDWARE(radeon);
295                 fprintf(stderr, "Error: R300 timed out... exiting\n");
296                 exit(-1);
297         }
298 }
299
300 static void radeon_flip_renderbuffers(struct radeon_framebuffer *rfb)
301 {
302         int current_page = rfb->pf_current_page;
303         int next_page = (current_page + 1) % rfb->pf_num_pages;
304         struct gl_renderbuffer *tmp_rb;
305
306         /* Exchange renderbuffers if necessary but make sure their
307          * reference counts are preserved.
308          */
309         if (rfb->color_rb[current_page] &&
310             rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
311             &rfb->color_rb[current_page]->base) {
312                 tmp_rb = NULL;
313                 _mesa_reference_renderbuffer(&tmp_rb,
314                                              rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
315                 tmp_rb = &rfb->color_rb[current_page]->base;
316                 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
317                 _mesa_reference_renderbuffer(&tmp_rb, NULL);
318         }
319
320         if (rfb->color_rb[next_page] &&
321             rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
322             &rfb->color_rb[next_page]->base) {
323                 tmp_rb = NULL;
324                 _mesa_reference_renderbuffer(&tmp_rb,
325                                              rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
326                 tmp_rb = &rfb->color_rb[next_page]->base;
327                 _mesa_reference_renderbuffer(&rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
328                 _mesa_reference_renderbuffer(&tmp_rb, NULL);
329         }
330 }
331
332 /**
333  * Check if we're about to draw into the front color buffer.
334  * If so, set the intel->front_buffer_dirty field to true.
335  */
336 void
337 radeon_check_front_buffer_rendering(struct gl_context *ctx)
338 {
339         radeonContextPtr radeon = RADEON_CONTEXT(ctx);
340         const struct gl_framebuffer *fb = ctx->DrawBuffer;
341
342         if (fb->Name == 0) {
343                 /* drawing to window system buffer */
344                 if (fb->_NumColorDrawBuffers > 0) {
345                         if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
346                                 radeon->front_buffer_dirty = GL_TRUE;
347                         }
348                 }
349         }
350 }
351
352
353 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
354 {
355         radeonContextPtr radeon = RADEON_CONTEXT(ctx);
356         struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
357                 *rrbColor = NULL;
358         uint32_t offset = 0;
359
360
361         if (!fb) {
362                 /* this can happen during the initial context initialization */
363                 return;
364         }
365
366         /* radeons only handle 1 color draw so far */
367         if (fb->_NumColorDrawBuffers != 1) {
368                 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
369                 return;
370         }
371
372         /* Do this here, note core Mesa, since this function is called from
373          * many places within the driver.
374          */
375         if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
376                 /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
377                 _mesa_update_framebuffer(ctx);
378                 /* this updates the DrawBuffer's Width/Height if it's a FBO */
379                 _mesa_update_draw_buffer_bounds(ctx);
380         }
381
382         if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
383                 /* this may occur when we're called by glBindFrameBuffer() during
384                  * the process of someone setting up renderbuffers, etc.
385                  */
386                 /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
387                 return;
388         }
389
390         if (fb->Name)
391                 ;/* do something depthy/stencily TODO */
392
393
394                 /* none */
395         if (fb->Name == 0) {
396                 if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
397                         rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
398                         radeon->front_cliprects = GL_TRUE;
399                 } else {
400                         rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
401                         radeon->front_cliprects = GL_FALSE;
402                 }
403         } else {
404                 /* user FBO in theory */
405                 struct radeon_renderbuffer *rrb;
406                 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
407                 if (rrb) {
408                         offset = rrb->draw_offset;
409                         rrbColor = rrb;
410                 }
411                 radeon->constant_cliprect = GL_TRUE;
412         }
413
414         if (rrbColor == NULL)
415                 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
416         else
417                 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
418
419
420         if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
421                 rrbDepth = radeon_renderbuffer(fb->_DepthBuffer->Wrapped);
422                 if (rrbDepth && rrbDepth->bo) {
423                         radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
424                 } else {
425                         radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
426                 }
427         } else {
428                 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
429                 rrbDepth = NULL;
430         }
431
432         if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
433                 rrbStencil = radeon_renderbuffer(fb->_StencilBuffer->Wrapped);
434                 if (rrbStencil && rrbStencil->bo) {
435                         radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
436                         /* need to re-compute stencil hw state */
437                         if (!rrbDepth)
438                                 rrbDepth = rrbStencil;
439                 } else {
440                         radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
441                 }
442         } else {
443                 radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
444                 if (ctx->Driver.Enable != NULL)
445                         ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
446                 else
447                         ctx->NewState |= _NEW_STENCIL;
448         }
449
450         /* Update culling direction which changes depending on the
451          * orientation of the buffer:
452          */
453         if (ctx->Driver.FrontFace)
454                 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
455         else
456                 ctx->NewState |= _NEW_POLYGON;
457
458         /*
459          * Update depth test state
460          */
461         if (ctx->Driver.Enable) {
462                 ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
463                                    (ctx->Depth.Test && fb->Visual.depthBits > 0));
464                 /* Need to update the derived ctx->Stencil._Enabled first */
465                 ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
466                                    (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
467         } else {
468                 ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
469         }
470
471         _mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base);
472         _mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base);
473         radeon->state.color.draw_offset = offset;
474
475 #if 0
476         /* update viewport since it depends on window size */
477         if (ctx->Driver.Viewport) {
478                 ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
479                                      ctx->Viewport.Width, ctx->Viewport.Height);
480         } else {
481
482         }
483 #endif
484         ctx->NewState |= _NEW_VIEWPORT;
485
486         /* Set state we know depends on drawable parameters:
487          */
488         radeonUpdateScissor(ctx);
489         radeon->NewGLState |= _NEW_SCISSOR;
490
491         if (ctx->Driver.DepthRange)
492                 ctx->Driver.DepthRange(ctx,
493                                        ctx->Viewport.Near,
494                                        ctx->Viewport.Far);
495
496         /* Update culling direction which changes depending on the
497          * orientation of the buffer:
498          */
499         if (ctx->Driver.FrontFace)
500                 ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
501         else
502                 ctx->NewState |= _NEW_POLYGON;
503 }
504
505 /**
506  * Called via glDrawBuffer.
507  */
508 void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
509 {
510         if (RADEON_DEBUG & RADEON_DRI)
511                 fprintf(stderr, "%s %s\n", __FUNCTION__,
512                         _mesa_lookup_enum_by_nr( mode ));
513
514         if (ctx->DrawBuffer->Name == 0) {
515                 radeonContextPtr radeon = RADEON_CONTEXT(ctx);
516
517                 const GLboolean was_front_buffer_rendering =
518                         radeon->is_front_buffer_rendering;
519
520                 radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
521                                             (mode == GL_FRONT);
522
523       /* If we weren't front-buffer rendering before but we are now, make sure
524        * that the front-buffer has actually been allocated.
525        */
526                 if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
527                         radeon_update_renderbuffers(radeon->dri.context,
528                                 radeon->dri.context->driDrawablePriv, GL_FALSE);
529       }
530         }
531
532         radeon_draw_buffer(ctx, ctx->DrawBuffer);
533 }
534
535 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
536 {
537         if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
538                 struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
539                 const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
540                 rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
541                                         || (mode == GL_FRONT);
542
543                 if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
544                         radeon_update_renderbuffers(rmesa->dri.context,
545                                                     rmesa->dri.context->driReadablePriv, GL_FALSE);
546                 }
547         }
548         /* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
549         if (ctx->ReadBuffer == ctx->DrawBuffer) {
550                 /* This will update FBO completeness status.
551                  * A framebuffer will be incomplete if the GL_READ_BUFFER setting
552                  * refers to a missing renderbuffer.  Calling glReadBuffer can set
553                  * that straight and can make the drawing buffer complete.
554                  */
555                 radeon_draw_buffer(ctx, ctx->DrawBuffer);
556         }
557 }
558
559
560 /* Turn on/off page flipping according to the flags in the sarea:
561  */
562 void radeonUpdatePageFlipping(radeonContextPtr radeon)
563 {
564         struct radeon_framebuffer *rfb = radeon_get_drawable(radeon)->driverPrivate;
565
566         rfb->pf_active = radeon->sarea->pfState;
567         rfb->pf_current_page = radeon->sarea->pfCurrentPage;
568         rfb->pf_num_pages = 2;
569         radeon_flip_renderbuffers(rfb);
570         radeon_draw_buffer(radeon->glCtx, radeon->glCtx->DrawBuffer);
571 }
572
573 void radeon_window_moved(radeonContextPtr radeon)
574 {
575         /* Cliprects has to be updated before doing anything else */
576         radeonSetCliprects(radeon);
577         if (!radeon->radeonScreen->driScreen->dri2.enabled) {
578                 radeonUpdatePageFlipping(radeon);
579         }
580 }
581
582 void radeon_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
583 {
584         radeonContextPtr radeon = RADEON_CONTEXT(ctx);
585         __DRIcontext *driContext = radeon->dri.context;
586         void (*old_viewport)(struct gl_context *ctx, GLint x, GLint y,
587                              GLsizei w, GLsizei h);
588
589         if (!driContext->driScreenPriv->dri2.enabled)
590                 return;
591
592         if (ctx->DrawBuffer->Name == 0) {
593                 if (radeon->is_front_buffer_rendering) {
594                         ctx->Driver.Flush(ctx);
595                 }
596                 radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
597                 if (driContext->driDrawablePriv != driContext->driReadablePriv)
598                         radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
599         }
600
601         old_viewport = ctx->Driver.Viewport;
602         ctx->Driver.Viewport = NULL;
603         radeon_window_moved(radeon);
604         radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
605         ctx->Driver.Viewport = old_viewport;
606 }
607
608 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
609 {
610         int i, j, reg, count;
611         int dwords;
612         uint32_t packet0;
613         if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
614                 return;
615
616         dwords = (*state->check) (radeon->glCtx, state);
617
618         fprintf(stderr, "  emit %s %d/%d\n", state->name, dwords, state->cmd_size);
619
620         if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
621                 if (dwords > state->cmd_size)
622                         dwords = state->cmd_size;
623                 for (i = 0; i < dwords;) {
624                         packet0 = state->cmd[i];
625                         reg = (packet0 & 0x1FFF) << 2;
626                         count = ((packet0 & 0x3FFF0000) >> 16) + 1;
627                         fprintf(stderr, "      %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
628                                         state->name, i, reg, count);
629                         ++i;
630                         for (j = 0; j < count && i < dwords; j++) {
631                                 fprintf(stderr, "      %s[%d]: 0x%04x = %08x\n",
632                                                 state->name, i, reg, state->cmd[i]);
633                                 reg += 4;
634                                 ++i;
635                         }
636                 }
637         }
638 }
639
640 /**
641  * Count total size for next state emit.
642  **/
643 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
644 {
645         struct radeon_state_atom *atom;
646         GLuint dwords = 0;
647         /* check if we are going to emit full state */
648
649         if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
650                 if (!radeon->hw.is_dirty)
651                         goto out;
652                 foreach(atom, &radeon->hw.atomlist) {
653                         if (atom->dirty) {
654                                 const GLuint atom_size = atom->check(radeon->glCtx, atom);
655                                 dwords += atom_size;
656                                 if (RADEON_CMDBUF && atom_size) {
657                                         radeon_print_state_atom(radeon, atom);
658                                 }
659                         }
660                 }
661         } else {
662                 foreach(atom, &radeon->hw.atomlist) {
663                         const GLuint atom_size = atom->check(radeon->glCtx, atom);
664                         dwords += atom_size;
665                         if (RADEON_CMDBUF && atom_size) {
666                                 radeon_print_state_atom(radeon, atom);
667                         }
668
669                 }
670         }
671 out:
672         radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
673         return dwords;
674 }
675
676 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
677 {
678         BATCH_LOCALS(radeon);
679         int dwords;
680
681         dwords = (*atom->check) (radeon->glCtx, atom);
682         if (dwords) {
683
684                 radeon_print_state_atom(radeon, atom);
685
686                 if (atom->emit) {
687                         (*atom->emit)(radeon->glCtx, atom);
688                 } else {
689                         BEGIN_BATCH_NO_AUTOSTATE(dwords);
690                         OUT_BATCH_TABLE(atom->cmd, dwords);
691                         END_BATCH();
692                 }
693                 atom->dirty = GL_FALSE;
694
695         } else {
696                 radeon_print(RADEON_STATE, RADEON_VERBOSE, "  skip state %s\n", atom->name);
697         }
698
699 }
700
701 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
702 {
703         struct radeon_state_atom *atom;
704
705         if (radeon->vtbl.pre_emit_atoms)
706                 radeon->vtbl.pre_emit_atoms(radeon);
707
708         /* Emit actual atoms */
709         if (radeon->hw.all_dirty || emitAll) {
710                 foreach(atom, &radeon->hw.atomlist)
711                         radeon_emit_atom( radeon, atom );
712         } else {
713                 foreach(atom, &radeon->hw.atomlist) {
714                         if ( atom->dirty )
715                                 radeon_emit_atom( radeon, atom );
716                 }
717         }
718
719         COMMIT_BATCH();
720 }
721
722 static GLboolean radeon_revalidate_bos(struct gl_context *ctx)
723 {
724         radeonContextPtr radeon = RADEON_CONTEXT(ctx);
725         int ret;
726
727         ret = radeon_cs_space_check(radeon->cmdbuf.cs);
728         if (ret == RADEON_CS_SPACE_FLUSH)
729                 return GL_FALSE;
730         return GL_TRUE;
731 }
732
733 void radeonEmitState(radeonContextPtr radeon)
734 {
735         radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
736
737         if (radeon->vtbl.pre_emit_state)
738                 radeon->vtbl.pre_emit_state(radeon);
739
740         /* this code used to return here but now it emits zbs */
741         if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
742                 return;
743
744         if (!radeon->cmdbuf.cs->cdw) {
745                 if (RADEON_DEBUG & RADEON_STATE)
746                         fprintf(stderr, "Begin reemit state\n");
747
748                 radeonEmitAtoms(radeon, GL_TRUE);
749         } else {
750
751                 if (RADEON_DEBUG & RADEON_STATE)
752                         fprintf(stderr, "Begin dirty state\n");
753
754                 radeonEmitAtoms(radeon, GL_FALSE);
755         }
756
757         radeon->hw.is_dirty = GL_FALSE;
758         radeon->hw.all_dirty = GL_FALSE;
759 }
760
761
762 void radeonFlush(struct gl_context *ctx)
763 {
764         radeonContextPtr radeon = RADEON_CONTEXT(ctx);
765         if (RADEON_DEBUG & RADEON_IOCTL)
766                 fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
767
768         /* okay if we have no cmds in the buffer &&
769            we have no DMA flush &&
770            we have no DMA buffer allocated.
771            then no point flushing anything at all.
772         */
773         if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
774                 goto flush_front;
775
776         if (radeon->dma.flush)
777                 radeon->dma.flush( ctx );
778
779         if (radeon->cmdbuf.cs->cdw)
780                 rcommonFlushCmdBuf(radeon, __FUNCTION__);
781
782 flush_front:
783         if ((ctx->DrawBuffer->Name == 0) && radeon->front_buffer_dirty) {
784                 __DRIscreen *const screen = radeon->radeonScreen->driScreen;
785
786                 if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
787                         && (screen->dri2.loader->flushFrontBuffer != NULL)) {
788                         __DRIdrawable * drawable = radeon_get_drawable(radeon);
789
790                         /* We set the dirty bit in radeon_prepare_render() if we're
791                          * front buffer rendering once we get there.
792                          */
793                         radeon->front_buffer_dirty = GL_FALSE;
794
795                         (*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
796                 }
797         }
798 }
799
800 /* Make sure all commands have been sent to the hardware and have
801  * completed processing.
802  */
803 void radeonFinish(struct gl_context * ctx)
804 {
805         radeonContextPtr radeon = RADEON_CONTEXT(ctx);
806         struct gl_framebuffer *fb = ctx->DrawBuffer;
807         struct radeon_renderbuffer *rrb;
808         int i;
809
810         if (ctx->Driver.Flush)
811                 ctx->Driver.Flush(ctx); /* +r6/r7 */
812
813         for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
814                 struct radeon_renderbuffer *rrb;
815                 rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
816                 if (rrb && rrb->bo)
817                         radeon_bo_wait(rrb->bo);
818         }
819         rrb = radeon_get_depthbuffer(radeon);
820         if (rrb && rrb->bo)
821                 radeon_bo_wait(rrb->bo);
822 }
823
824 /* cmdbuffer */
825 /**
826  * Send the current command buffer via ioctl to the hardware.
827  */
828 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
829 {
830         int ret = 0;
831
832         if (rmesa->cmdbuf.flushing) {
833                 fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
834                 exit(-1);
835         }
836         rmesa->cmdbuf.flushing = 1;
837
838         if (RADEON_DEBUG & RADEON_IOCTL) {
839                 fprintf(stderr, "%s from %s - %i cliprects\n",
840                         __FUNCTION__, caller, rmesa->numClipRects);
841         }
842
843         radeonEmitQueryEnd(rmesa->glCtx);
844
845         if (rmesa->cmdbuf.cs->cdw) {
846                 ret = radeon_cs_emit(rmesa->cmdbuf.cs);
847                 rmesa->hw.all_dirty = GL_TRUE;
848         }
849         radeon_cs_erase(rmesa->cmdbuf.cs);
850         rmesa->cmdbuf.flushing = 0;
851
852         if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
853                 fprintf(stderr,"failed to revalidate buffers\n");
854         }
855
856         return ret;
857 }
858
859 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
860 {
861         int ret;
862
863         radeonReleaseDmaRegions(rmesa);
864
865         LOCK_HARDWARE(rmesa);
866         ret = rcommonFlushCmdBufLocked(rmesa, caller);
867         UNLOCK_HARDWARE(rmesa);
868
869         if (ret) {
870                 fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
871                                 "parse or rejected command stream. See dmesg "
872                                 "for more info.\n", ret);
873                 exit(ret);
874         }
875
876         return ret;
877 }
878
879 /**
880  * Make sure that enough space is available in the command buffer
881  * by flushing if necessary.
882  *
883  * \param dwords The number of dwords we need to be free on the command buffer
884  */
885 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
886 {
887    if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
888          || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
889       /* If we try to flush empty buffer there is too big rendering operation. */
890       assert(rmesa->cmdbuf.cs->cdw);
891       rcommonFlushCmdBuf(rmesa, caller);
892       return GL_TRUE;
893    }
894    return GL_FALSE;
895 }
896
897 void rcommonInitCmdBuf(radeonContextPtr rmesa)
898 {
899         GLuint size;
900         struct drm_radeon_gem_info mminfo = { 0 };
901
902         /* Initialize command buffer */
903         size = 256 * driQueryOptioni(&rmesa->optionCache,
904                                      "command_buffer_size");
905         if (size < 2 * rmesa->hw.max_state_size) {
906                 size = 2 * rmesa->hw.max_state_size + 65535;
907         }
908         if (size > 64 * 256)
909                 size = 64 * 256;
910
911         radeon_print(RADEON_CS, RADEON_VERBOSE,
912                         "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
913         radeon_print(RADEON_CS, RADEON_VERBOSE,
914                         "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
915         radeon_print(RADEON_CS, RADEON_VERBOSE,
916                         "Allocating %d bytes command buffer (max state is %d bytes)\n",
917                         size * 4, rmesa->hw.max_state_size * 4);
918
919         rmesa->cmdbuf.csm =
920                 radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd);
921         if (rmesa->cmdbuf.csm == NULL) {
922                 /* FIXME: fatal error */
923                 return;
924         }
925         rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
926         assert(rmesa->cmdbuf.cs != NULL);
927         rmesa->cmdbuf.size = size;
928
929         radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
930                                   (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
931
932
933         if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
934                                  &mminfo, sizeof(mminfo))) {
935                 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
936                                     mminfo.vram_visible);
937                 radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
938                                     mminfo.gart_size);
939         }
940 }
941
942 /**
943  * Destroy the command buffer
944  */
945 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
946 {
947         radeon_cs_destroy(rmesa->cmdbuf.cs);
948         radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
949 }
950
951 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
952                        int dostate,
953                        const char *file,
954                        const char *function,
955                        int line)
956 {
957         radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
958
959     radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
960                         n, rmesa->cmdbuf.cs->cdw, function, line);
961
962 }
963
964 void radeonUserClear(struct gl_context *ctx, GLuint mask)
965 {
966    _mesa_meta_Clear(ctx, mask);
967 }