Tizen 2.1 base
[sdk/emulator/qemu.git] / gl / mesa / src / mesa / drivers / dri / intel / intel_mipmap_tree.c
1 /**************************************************************************
2  * 
3  * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  * 
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  * 
26  **************************************************************************/
27
28 #include "intel_batchbuffer.h"
29 #include "intel_context.h"
30 #include "intel_mipmap_tree.h"
31 #include "intel_regions.h"
32 #include "intel_resolve_map.h"
33 #include "intel_span.h"
34 #include "intel_tex_layout.h"
35 #include "intel_tex.h"
36 #include "intel_blit.h"
37
38 #include "main/enums.h"
39 #include "main/formats.h"
40 #include "main/image.h"
41 #include "main/teximage.h"
42
43 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
44
45 static GLenum
46 target_to_target(GLenum target)
47 {
48    switch (target) {
49    case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
50    case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
51    case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
52    case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
53    case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
54    case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
55       return GL_TEXTURE_CUBE_MAP_ARB;
56    default:
57       return target;
58    }
59 }
60
61 /**
62  * @param for_region Indicates that the caller is
63  *        intel_miptree_create_for_region(). If true, then do not create
64  *        \c stencil_mt.
65  */
66 static struct intel_mipmap_tree *
67 intel_miptree_create_internal(struct intel_context *intel,
68                               GLenum target,
69                               gl_format format,
70                               GLuint first_level,
71                               GLuint last_level,
72                               GLuint width0,
73                               GLuint height0,
74                               GLuint depth0,
75                               bool for_region)
76 {
77    struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
78    int compress_byte = 0;
79
80    DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
81        _mesa_lookup_enum_by_nr(target),
82        _mesa_get_format_name(format),
83        first_level, last_level, mt);
84
85    if (_mesa_is_format_compressed(format))
86       compress_byte = intel_compressed_num_bytes(format);
87
88    mt->target = target_to_target(target);
89    mt->format = format;
90    mt->first_level = first_level;
91    mt->last_level = last_level;
92    mt->width0 = width0;
93    mt->height0 = height0;
94    mt->cpp = compress_byte ? compress_byte : _mesa_get_format_bytes(mt->format);
95    mt->compressed = compress_byte ? 1 : 0;
96    mt->refcount = 1; 
97
98    if (target == GL_TEXTURE_CUBE_MAP) {
99       assert(depth0 == 1);
100       mt->depth0 = 6;
101    } else {
102       mt->depth0 = depth0;
103    }
104
105    if (format == MESA_FORMAT_S8) {
106       /* The stencil buffer has quirky pitch requirements.  From Vol 2a,
107        * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
108        *    The pitch must be set to 2x the value computed based on width, as
109        *    the stencil buffer is stored with two rows interleaved.
110        */
111       assert(intel->has_separate_stencil);
112       mt->cpp = 2;
113    }
114
115    if (!for_region &&
116        _mesa_is_depthstencil_format(_mesa_get_format_base_format(format)) &&
117        (intel->must_use_separate_stencil ||
118         (intel->has_separate_stencil &&
119          intel->vtbl.is_hiz_depth_format(intel, format)))) {
120       mt->stencil_mt = intel_miptree_create(intel,
121                                             mt->target,
122                                             MESA_FORMAT_S8,
123                                             mt->first_level,
124                                             mt->last_level,
125                                             mt->width0,
126                                             mt->height0,
127                                             mt->depth0,
128                                             true);
129       if (!mt->stencil_mt) {
130          intel_miptree_release(&mt);
131          return NULL;
132       }
133
134       /* Fix up the Z miptree format for how we're splitting out separate
135        * stencil.  Gen7 expects there to be no stencil bits in its depth buffer.
136        */
137       if (mt->format == MESA_FORMAT_S8_Z24) {
138          mt->format = MESA_FORMAT_X8_Z24;
139       } else if (mt->format == MESA_FORMAT_Z32_FLOAT_X24S8) {
140          mt->format = MESA_FORMAT_Z32_FLOAT;
141          mt->cpp = 4;
142       } else {
143          _mesa_problem(NULL, "Unknown format %s in separate stencil mt\n",
144                        _mesa_get_format_name(mt->format));
145       }
146    }
147
148    intel_get_texture_alignment_unit(intel, mt->format,
149                                     &mt->align_w, &mt->align_h);
150
151 #ifdef I915
152    (void) intel;
153    if (intel->is_945)
154       i945_miptree_layout(mt);
155    else
156       i915_miptree_layout(mt);
157 #else
158    brw_miptree_layout(intel, mt);
159 #endif
160
161    return mt;
162 }
163
164
165 struct intel_mipmap_tree *
166 intel_miptree_create(struct intel_context *intel,
167                      GLenum target,
168                      gl_format format,
169                      GLuint first_level,
170                      GLuint last_level,
171                      GLuint width0,
172                      GLuint height0,
173                      GLuint depth0,
174                      bool expect_accelerated_upload)
175 {
176    struct intel_mipmap_tree *mt;
177    uint32_t tiling = I915_TILING_NONE;
178    GLenum base_format = _mesa_get_format_base_format(format);
179
180    if (intel->use_texture_tiling && !_mesa_is_format_compressed(format)) {
181       if (intel->gen >= 4 &&
182           (base_format == GL_DEPTH_COMPONENT ||
183            base_format == GL_DEPTH_STENCIL_EXT))
184          tiling = I915_TILING_Y;
185       else if (width0 >= 64)
186          tiling = I915_TILING_X;
187    }
188
189    if (format == MESA_FORMAT_S8) {
190       /* The stencil buffer is W tiled. However, we request from the kernel a
191        * non-tiled buffer because the GTT is incapable of W fencing.
192        *
193        * The stencil buffer has quirky pitch requirements.  From Vol 2a,
194        * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
195        *    The pitch must be set to 2x the value computed based on width, as
196        *    the stencil buffer is stored with two rows interleaved.
197        * To accomplish this, we resort to the nasty hack of doubling the drm
198        * region's cpp and halving its height.
199        *
200        * If we neglect to double the pitch, then render corruption occurs.
201        */
202       tiling = I915_TILING_NONE;
203       width0 = ALIGN(width0, 64);
204       height0 = ALIGN((height0 + 1) / 2, 64);
205    }
206
207    mt = intel_miptree_create_internal(intel, target, format,
208                                       first_level, last_level, width0,
209                                       height0, depth0,
210                                       false);
211    /*
212     * pitch == 0 || height == 0  indicates the null texture
213     */
214    if (!mt || !mt->total_width || !mt->total_height) {
215       intel_miptree_release(&mt);
216       return NULL;
217    }
218
219    mt->region = intel_region_alloc(intel->intelScreen,
220                                    tiling,
221                                    mt->cpp,
222                                    mt->total_width,
223                                    mt->total_height,
224                                    expect_accelerated_upload);
225
226    if (!mt->region) {
227        intel_miptree_release(&mt);
228        return NULL;
229    }
230
231    return mt;
232 }
233
234
235 struct intel_mipmap_tree *
236 intel_miptree_create_for_region(struct intel_context *intel,
237                                 GLenum target,
238                                 gl_format format,
239                                 struct intel_region *region)
240 {
241    struct intel_mipmap_tree *mt;
242
243    mt = intel_miptree_create_internal(intel, target, format,
244                                       0, 0,
245                                       region->width, region->height, 1,
246                                       true);
247    if (!mt)
248       return mt;
249
250    intel_region_reference(&mt->region, region);
251
252    return mt;
253 }
254
255 struct intel_mipmap_tree*
256 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
257                                       gl_format format,
258                                       uint32_t width,
259                                       uint32_t height)
260 {
261    struct intel_mipmap_tree *mt;
262
263    mt = intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
264                              width, height, 1, true);
265
266    return mt;
267 }
268
269 void
270 intel_miptree_reference(struct intel_mipmap_tree **dst,
271                         struct intel_mipmap_tree *src)
272 {
273    if (*dst == src)
274       return;
275
276    intel_miptree_release(dst);
277
278    if (src) {
279       src->refcount++;
280       DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
281    }
282
283    *dst = src;
284 }
285
286
287 void
288 intel_miptree_release(struct intel_mipmap_tree **mt)
289 {
290    if (!*mt)
291       return;
292
293    DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
294    if (--(*mt)->refcount <= 0) {
295       GLuint i;
296
297       DBG("%s deleting %p\n", __FUNCTION__, *mt);
298
299       intel_region_release(&((*mt)->region));
300       intel_miptree_release(&(*mt)->stencil_mt);
301       intel_miptree_release(&(*mt)->hiz_mt);
302       intel_resolve_map_clear(&(*mt)->hiz_map);
303
304       for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
305          free((*mt)->level[i].slice);
306       }
307
308       free(*mt);
309    }
310    *mt = NULL;
311 }
312
313 void
314 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
315                                        int *width, int *height, int *depth)
316 {
317    switch (image->TexObject->Target) {
318    case GL_TEXTURE_1D_ARRAY:
319       *width = image->Width;
320       *height = 1;
321       *depth = image->Height;
322       break;
323    default:
324       *width = image->Width;
325       *height = image->Height;
326       *depth = image->Depth;
327       break;
328    }
329 }
330
331 /**
332  * Can the image be pulled into a unified mipmap tree?  This mirrors
333  * the completeness test in a lot of ways.
334  *
335  * Not sure whether I want to pass gl_texture_image here.
336  */
337 bool
338 intel_miptree_match_image(struct intel_mipmap_tree *mt,
339                           struct gl_texture_image *image)
340 {
341    struct intel_texture_image *intelImage = intel_texture_image(image);
342    GLuint level = intelImage->base.Base.Level;
343    int width, height, depth;
344
345    if (target_to_target(image->TexObject->Target) != mt->target)
346       return false;
347
348    if (image->TexFormat != mt->format &&
349        !(image->TexFormat == MESA_FORMAT_S8_Z24 &&
350          mt->format == MESA_FORMAT_X8_Z24 &&
351          mt->stencil_mt)) {
352       return false;
353    }
354
355    intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
356
357    if (mt->target == GL_TEXTURE_CUBE_MAP)
358       depth = 6;
359
360    /* Test image dimensions against the base level image adjusted for
361     * minification.  This will also catch images not present in the
362     * tree, changed targets, etc.
363     */
364    if (width != mt->level[level].width ||
365        height != mt->level[level].height ||
366        depth != mt->level[level].depth)
367       return false;
368
369    return true;
370 }
371
372
373 void
374 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
375                              GLuint level,
376                              GLuint x, GLuint y,
377                              GLuint w, GLuint h, GLuint d)
378 {
379    mt->level[level].width = w;
380    mt->level[level].height = h;
381    mt->level[level].depth = d;
382    mt->level[level].level_x = x;
383    mt->level[level].level_y = y;
384
385    DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
386        level, w, h, d, x, y);
387
388    assert(mt->level[level].slice == NULL);
389
390    mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
391    mt->level[level].slice[0].x_offset = mt->level[level].level_x;
392    mt->level[level].slice[0].y_offset = mt->level[level].level_y;
393 }
394
395
396 void
397 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
398                                GLuint level, GLuint img,
399                                GLuint x, GLuint y)
400 {
401    if (img == 0 && level == 0)
402       assert(x == 0 && y == 0);
403
404    assert(img < mt->level[level].depth);
405
406    mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
407    mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
408
409    DBG("%s level %d img %d pos %d,%d\n",
410        __FUNCTION__, level, img,
411        mt->level[level].slice[img].x_offset,
412        mt->level[level].slice[img].y_offset);
413 }
414
415
416 /**
417  * For cube map textures, either the \c face parameter can be used, of course,
418  * or the cube face can be interpreted as a depth layer and the \c layer
419  * parameter used.
420  */
421 void
422 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
423                                GLuint level, GLuint face, GLuint layer,
424                                GLuint *x, GLuint *y)
425 {
426    int slice;
427
428    if (face > 0) {
429       assert(mt->target == GL_TEXTURE_CUBE_MAP);
430       assert(face < 6);
431       assert(layer == 0);
432       slice = face;
433    } else {
434       /* This branch may be taken even if the texture target is a cube map. In
435        * that case, the caller chose to interpret each cube face as a layer.
436        */
437       assert(face == 0);
438       slice = layer;
439    }
440
441    *x = mt->level[level].slice[slice].x_offset;
442    *y = mt->level[level].slice[slice].y_offset;
443 }
444
445 static void
446 intel_miptree_copy_slice(struct intel_context *intel,
447                          struct intel_mipmap_tree *dst_mt,
448                          struct intel_mipmap_tree *src_mt,
449                          int level,
450                          int face,
451                          int depth)
452
453 {
454    gl_format format = src_mt->format;
455    uint32_t width = src_mt->level[level].width;
456    uint32_t height = src_mt->level[level].height;
457
458    assert(depth < src_mt->level[level].depth);
459
460    if (dst_mt->compressed) {
461       height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
462       width = ALIGN(width, dst_mt->align_w);
463    }
464
465    uint32_t dst_x, dst_y, src_x, src_y;
466    intel_miptree_get_image_offset(dst_mt, level, face, depth,
467                                   &dst_x, &dst_y);
468    intel_miptree_get_image_offset(src_mt, level, face, depth,
469                                   &src_x, &src_y);
470
471    DBG("validate blit mt %p %d,%d/%d -> mt %p %d,%d/%d (%dx%d)\n",
472        src_mt, src_x, src_y, src_mt->region->pitch * src_mt->region->cpp,
473        dst_mt, dst_x, dst_y, dst_mt->region->pitch * dst_mt->region->cpp,
474        width, height);
475
476    if (!intelEmitCopyBlit(intel,
477                           dst_mt->region->cpp,
478                           src_mt->region->pitch, src_mt->region->bo,
479                           0, src_mt->region->tiling,
480                           dst_mt->region->pitch, dst_mt->region->bo,
481                           0, dst_mt->region->tiling,
482                           src_x, src_y,
483                           dst_x, dst_y,
484                           width, height,
485                           GL_COPY)) {
486
487       fallback_debug("miptree validate blit for %s failed\n",
488                      _mesa_get_format_name(format));
489       void *dst = intel_region_map(intel, dst_mt->region, GL_MAP_WRITE_BIT);
490       void *src = intel_region_map(intel, src_mt->region, GL_MAP_READ_BIT);
491
492       _mesa_copy_rect(dst,
493                       dst_mt->cpp,
494                       dst_mt->region->pitch,
495                       dst_x, dst_y,
496                       width, height,
497                       src, src_mt->region->pitch,
498                       src_x, src_y);
499
500       intel_region_unmap(intel, dst_mt->region);
501       intel_region_unmap(intel, src_mt->region);
502    }
503
504    if (src_mt->stencil_mt) {
505       intel_miptree_copy_slice(intel,
506                                dst_mt->stencil_mt, src_mt->stencil_mt,
507                                level, face, depth);
508    }
509 }
510
511 /**
512  * Copies the image's current data to the given miptree, and associates that
513  * miptree with the image.
514  */
515 void
516 intel_miptree_copy_teximage(struct intel_context *intel,
517                             struct intel_texture_image *intelImage,
518                             struct intel_mipmap_tree *dst_mt)
519 {
520    struct intel_mipmap_tree *src_mt = intelImage->mt;
521    int level = intelImage->base.Base.Level;
522    int face = intelImage->base.Base.Face;
523    GLuint depth = intelImage->base.Base.Depth;
524
525    for (int slice = 0; slice < depth; slice++) {
526       intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
527    }
528
529    intel_miptree_reference(&intelImage->mt, dst_mt);
530 }
531
532 bool
533 intel_miptree_alloc_hiz(struct intel_context *intel,
534                         struct intel_mipmap_tree *mt)
535 {
536    assert(mt->hiz_mt == NULL);
537    mt->hiz_mt = intel_miptree_create(intel,
538                                      mt->target,
539                                      MESA_FORMAT_X8_Z24,
540                                      mt->first_level,
541                                      mt->last_level,
542                                      mt->width0,
543                                      mt->height0,
544                                      mt->depth0,
545                                      true);
546
547    if (!mt->hiz_mt)
548       return false;
549
550    /* Mark that all slices need a HiZ resolve. */
551    struct intel_resolve_map *head = &mt->hiz_map;
552    for (int level = mt->first_level; level <= mt->last_level; ++level) {
553       for (int layer = 0; layer < mt->level[level].depth; ++layer) {
554          head->next = malloc(sizeof(*head->next));
555          head->next->prev = head;
556          head->next->next = NULL;
557          head = head->next;
558
559          head->level = level;
560          head->layer = layer;
561          head->need = INTEL_NEED_HIZ_RESOLVE;
562       }
563    }
564
565    return true;
566 }
567
568 void
569 intel_miptree_slice_set_needs_hiz_resolve(struct intel_mipmap_tree *mt,
570                                           uint32_t level,
571                                           uint32_t layer)
572 {
573    intel_miptree_check_level_layer(mt, level, layer);
574
575    if (!mt->hiz_mt)
576       return;
577
578    intel_resolve_map_set(&mt->hiz_map,
579                          level, layer, INTEL_NEED_HIZ_RESOLVE);
580 }
581
582
583 void
584 intel_miptree_slice_set_needs_depth_resolve(struct intel_mipmap_tree *mt,
585                                             uint32_t level,
586                                             uint32_t layer)
587 {
588    intel_miptree_check_level_layer(mt, level, layer);
589
590    if (!mt->hiz_mt)
591       return;
592
593    intel_resolve_map_set(&mt->hiz_map,
594                          level, layer, INTEL_NEED_DEPTH_RESOLVE);
595 }
596
597 typedef void (*resolve_func_t)(struct intel_context *intel,
598                                struct intel_mipmap_tree *mt,
599                                uint32_t level,
600                                uint32_t layer);
601
602 static bool
603 intel_miptree_slice_resolve(struct intel_context *intel,
604                             struct intel_mipmap_tree *mt,
605                             uint32_t level,
606                             uint32_t layer,
607                             enum intel_need_resolve need,
608                             resolve_func_t func)
609 {
610    intel_miptree_check_level_layer(mt, level, layer);
611
612    struct intel_resolve_map *item =
613          intel_resolve_map_get(&mt->hiz_map, level, layer);
614
615    if (!item || item->need != need)
616       return false;
617
618    func(intel, mt, level, layer);
619    intel_resolve_map_remove(item);
620    return true;
621 }
622
623 bool
624 intel_miptree_slice_resolve_hiz(struct intel_context *intel,
625                                 struct intel_mipmap_tree *mt,
626                                 uint32_t level,
627                                 uint32_t layer)
628 {
629    return intel_miptree_slice_resolve(intel, mt, level, layer,
630                                       INTEL_NEED_HIZ_RESOLVE,
631                                       intel->vtbl.resolve_hiz_slice);
632 }
633
634 bool
635 intel_miptree_slice_resolve_depth(struct intel_context *intel,
636                                   struct intel_mipmap_tree *mt,
637                                   uint32_t level,
638                                   uint32_t layer)
639 {
640    return intel_miptree_slice_resolve(intel, mt, level, layer,
641                                       INTEL_NEED_DEPTH_RESOLVE,
642                                       intel->vtbl.resolve_depth_slice);
643 }
644
645 static bool
646 intel_miptree_all_slices_resolve(struct intel_context *intel,
647                                  struct intel_mipmap_tree *mt,
648                                  enum intel_need_resolve need,
649                                  resolve_func_t func)
650 {
651    bool did_resolve = false;
652    struct intel_resolve_map *i, *next;
653
654    for (i = mt->hiz_map.next; i; i = next) {
655       next = i->next;
656       if (i->need != need)
657          continue;
658       func(intel, mt, i->level, i->layer);
659       intel_resolve_map_remove(i);
660       did_resolve = true;
661    }
662
663    return did_resolve;
664 }
665
666 bool
667 intel_miptree_all_slices_resolve_hiz(struct intel_context *intel,
668                                      struct intel_mipmap_tree *mt)
669 {
670    return intel_miptree_all_slices_resolve(intel, mt,
671                                            INTEL_NEED_HIZ_RESOLVE,
672                                            intel->vtbl.resolve_hiz_slice);
673 }
674
675 bool
676 intel_miptree_all_slices_resolve_depth(struct intel_context *intel,
677                                        struct intel_mipmap_tree *mt)
678 {
679    return intel_miptree_all_slices_resolve(intel, mt,
680                                            INTEL_NEED_DEPTH_RESOLVE,
681                                            intel->vtbl.resolve_depth_slice);
682 }
683
684 static void
685 intel_miptree_map_gtt(struct intel_context *intel,
686                       struct intel_mipmap_tree *mt,
687                       struct intel_miptree_map *map,
688                       unsigned int level, unsigned int slice)
689 {
690    unsigned int bw, bh;
691    void *base;
692    unsigned int image_x, image_y;
693    int x = map->x;
694    int y = map->y;
695
696    /* For compressed formats, the stride is the number of bytes per
697     * row of blocks.  intel_miptree_get_image_offset() already does
698     * the divide.
699     */
700    _mesa_get_format_block_size(mt->format, &bw, &bh);
701    assert(y % bh == 0);
702    y /= bh;
703
704    base = intel_region_map(intel, mt->region, map->mode);
705
706    if (base == NULL)
707       map->ptr = NULL;
708    else {
709       /* Note that in the case of cube maps, the caller must have passed the
710        * slice number referencing the face.
711       */
712       intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
713       x += image_x;
714       y += image_y;
715
716       map->stride = mt->region->pitch * mt->cpp;
717       map->ptr = base + y * map->stride + x * mt->cpp;
718    }
719
720    DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
721        map->x, map->y, map->w, map->h,
722        mt, _mesa_get_format_name(mt->format),
723        x, y, map->ptr, map->stride);
724 }
725
726 static void
727 intel_miptree_unmap_gtt(struct intel_context *intel,
728                         struct intel_mipmap_tree *mt,
729                         struct intel_miptree_map *map,
730                         unsigned int level,
731                         unsigned int slice)
732 {
733    intel_region_unmap(intel, mt->region);
734 }
735
736 static void
737 intel_miptree_map_blit(struct intel_context *intel,
738                        struct intel_mipmap_tree *mt,
739                        struct intel_miptree_map *map,
740                        unsigned int level, unsigned int slice)
741 {
742    unsigned int image_x, image_y;
743    int x = map->x;
744    int y = map->y;
745    int ret;
746
747    /* The blitter requires the pitch to be aligned to 4. */
748    map->stride = ALIGN(map->w * mt->region->cpp, 4);
749
750    map->bo = drm_intel_bo_alloc(intel->bufmgr, "intel_miptree_map_blit() temp",
751                                 map->stride * map->h, 4096);
752    if (!map->bo) {
753       fprintf(stderr, "Failed to allocate blit temporary\n");
754       goto fail;
755    }
756
757    intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
758    x += image_x;
759    y += image_y;
760
761    if (!intelEmitCopyBlit(intel,
762                           mt->region->cpp,
763                           mt->region->pitch, mt->region->bo,
764                           0, mt->region->tiling,
765                           map->stride / mt->region->cpp, map->bo,
766                           0, I915_TILING_NONE,
767                           x, y,
768                           0, 0,
769                           map->w, map->h,
770                           GL_COPY)) {
771       fprintf(stderr, "Failed to blit\n");
772       goto fail;
773    }
774
775    intel_batchbuffer_flush(intel);
776    ret = drm_intel_bo_map(map->bo, (map->mode & GL_MAP_WRITE_BIT) != 0);
777    if (ret) {
778       fprintf(stderr, "Failed to map blit temporary\n");
779       goto fail;
780    }
781
782    map->ptr = map->bo->virtual;
783
784    DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__,
785        map->x, map->y, map->w, map->h,
786        mt, _mesa_get_format_name(mt->format),
787        x, y, map->ptr, map->stride);
788
789    return;
790
791 fail:
792    drm_intel_bo_unreference(map->bo);
793    map->ptr = NULL;
794    map->stride = 0;
795 }
796
797 static void
798 intel_miptree_unmap_blit(struct intel_context *intel,
799                          struct intel_mipmap_tree *mt,
800                          struct intel_miptree_map *map,
801                          unsigned int level,
802                          unsigned int slice)
803 {
804    assert(!(map->mode & GL_MAP_WRITE_BIT));
805
806    drm_intel_bo_unmap(map->bo);
807    drm_intel_bo_unreference(map->bo);
808 }
809
810 static void
811 intel_miptree_map_s8(struct intel_context *intel,
812                      struct intel_mipmap_tree *mt,
813                      struct intel_miptree_map *map,
814                      unsigned int level, unsigned int slice)
815 {
816    map->stride = map->w;
817    map->buffer = map->ptr = malloc(map->stride * map->h);
818    if (!map->buffer)
819       return;
820
821    /* One of either READ_BIT or WRITE_BIT or both is set.  READ_BIT implies no
822     * INVALIDATE_RANGE_BIT.  WRITE_BIT needs the original values read in unless
823     * invalidate is set, since we'll be writing the whole rectangle from our
824     * temporary buffer back out.
825     */
826    if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
827       uint8_t *untiled_s8_map = map->ptr;
828       uint8_t *tiled_s8_map = intel_region_map(intel, mt->region,
829                                                GL_MAP_READ_BIT);
830       unsigned int image_x, image_y;
831
832       intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
833
834       for (uint32_t y = 0; y < map->h; y++) {
835          for (uint32_t x = 0; x < map->w; x++) {
836             ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
837                                                x + image_x + map->x,
838                                                y + image_y + map->y,
839                                                intel->has_swizzling);
840             untiled_s8_map[y * map->w + x] = tiled_s8_map[offset];
841          }
842       }
843
844       intel_region_unmap(intel, mt->region);
845
846       DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __FUNCTION__,
847           map->x, map->y, map->w, map->h,
848           mt, map->x + image_x, map->y + image_y, map->ptr, map->stride);
849    } else {
850       DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
851           map->x, map->y, map->w, map->h,
852           mt, map->ptr, map->stride);
853    }
854 }
855
856 static void
857 intel_miptree_unmap_s8(struct intel_context *intel,
858                        struct intel_mipmap_tree *mt,
859                        struct intel_miptree_map *map,
860                        unsigned int level,
861                        unsigned int slice)
862 {
863    if (map->mode & GL_MAP_WRITE_BIT) {
864       unsigned int image_x, image_y;
865       uint8_t *untiled_s8_map = map->ptr;
866       uint8_t *tiled_s8_map = intel_region_map(intel, mt->region, map->mode);
867
868       intel_miptree_get_image_offset(mt, level, 0, slice, &image_x, &image_y);
869
870       for (uint32_t y = 0; y < map->h; y++) {
871          for (uint32_t x = 0; x < map->w; x++) {
872             ptrdiff_t offset = intel_offset_S8(mt->region->pitch,
873                                                x + map->x,
874                                                y + map->y,
875                                                intel->has_swizzling);
876             tiled_s8_map[offset] = untiled_s8_map[y * map->w + x];
877          }
878       }
879
880       intel_region_unmap(intel, mt->region);
881    }
882
883    free(map->buffer);
884 }
885
886 /**
887  * Mapping function for packed depth/stencil miptrees backed by real separate
888  * miptrees for depth and stencil.
889  *
890  * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer
891  * separate from the depth buffer.  Yet at the GL API level, we have to expose
892  * packed depth/stencil textures and FBO attachments, and Mesa core expects to
893  * be able to map that memory for texture storage and glReadPixels-type
894  * operations.  We give Mesa core that access by mallocing a temporary and
895  * copying the data between the actual backing store and the temporary.
896  */
897 static void
898 intel_miptree_map_depthstencil(struct intel_context *intel,
899                                struct intel_mipmap_tree *mt,
900                                struct intel_miptree_map *map,
901                                unsigned int level, unsigned int slice)
902 {
903    struct intel_mipmap_tree *z_mt = mt;
904    struct intel_mipmap_tree *s_mt = mt->stencil_mt;
905    bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
906    int packed_bpp = map_z32f_x24s8 ? 8 : 4;
907
908    map->stride = map->w * packed_bpp;
909    map->buffer = map->ptr = malloc(map->stride * map->h);
910    if (!map->buffer)
911       return;
912
913    /* One of either READ_BIT or WRITE_BIT or both is set.  READ_BIT implies no
914     * INVALIDATE_RANGE_BIT.  WRITE_BIT needs the original values read in unless
915     * invalidate is set, since we'll be writing the whole rectangle from our
916     * temporary buffer back out.
917     */
918    if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) {
919       uint32_t *packed_map = map->ptr;
920       uint8_t *s_map = intel_region_map(intel, s_mt->region, GL_MAP_READ_BIT);
921       uint32_t *z_map = intel_region_map(intel, z_mt->region, GL_MAP_READ_BIT);
922       unsigned int s_image_x, s_image_y;
923       unsigned int z_image_x, z_image_y;
924
925       intel_miptree_get_image_offset(s_mt, level, 0, slice,
926                                      &s_image_x, &s_image_y);
927       intel_miptree_get_image_offset(z_mt, level, 0, slice,
928                                      &z_image_x, &z_image_y);
929
930       for (uint32_t y = 0; y < map->h; y++) {
931          for (uint32_t x = 0; x < map->w; x++) {
932             int map_x = map->x + x, map_y = map->y + y;
933             ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
934                                                  map_x + s_image_x,
935                                                  map_y + s_image_y,
936                                                  intel->has_swizzling);
937             ptrdiff_t z_offset = ((map_y + z_image_y) * z_mt->region->pitch +
938                                   (map_x + z_image_x));
939             uint8_t s = s_map[s_offset];
940             uint32_t z = z_map[z_offset];
941
942             if (map_z32f_x24s8) {
943                packed_map[(y * map->w + x) * 2 + 0] = z;
944                packed_map[(y * map->w + x) * 2 + 1] = s;
945             } else {
946                packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff);
947             }
948          }
949       }
950
951       intel_region_unmap(intel, s_mt->region);
952       intel_region_unmap(intel, z_mt->region);
953
954       DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n",
955           __FUNCTION__,
956           map->x, map->y, map->w, map->h,
957           z_mt, map->x + z_image_x, map->y + z_image_y,
958           s_mt, map->x + s_image_x, map->y + s_image_y,
959           map->ptr, map->stride);
960    } else {
961       DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __FUNCTION__,
962           map->x, map->y, map->w, map->h,
963           mt, map->ptr, map->stride);
964    }
965 }
966
967 static void
968 intel_miptree_unmap_depthstencil(struct intel_context *intel,
969                                  struct intel_mipmap_tree *mt,
970                                  struct intel_miptree_map *map,
971                                  unsigned int level,
972                                  unsigned int slice)
973 {
974    struct intel_mipmap_tree *z_mt = mt;
975    struct intel_mipmap_tree *s_mt = mt->stencil_mt;
976    bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z32_FLOAT;
977
978    if (map->mode & GL_MAP_WRITE_BIT) {
979       uint32_t *packed_map = map->ptr;
980       uint8_t *s_map = intel_region_map(intel, s_mt->region, map->mode);
981       uint32_t *z_map = intel_region_map(intel, z_mt->region, map->mode);
982       unsigned int s_image_x, s_image_y;
983       unsigned int z_image_x, z_image_y;
984
985       intel_miptree_get_image_offset(s_mt, level, 0, slice,
986                                      &s_image_x, &s_image_y);
987       intel_miptree_get_image_offset(z_mt, level, 0, slice,
988                                      &z_image_x, &z_image_y);
989
990       for (uint32_t y = 0; y < map->h; y++) {
991          for (uint32_t x = 0; x < map->w; x++) {
992             ptrdiff_t s_offset = intel_offset_S8(s_mt->region->pitch,
993                                                  x + s_image_x + map->x,
994                                                  y + s_image_y + map->y,
995                                                  intel->has_swizzling);
996             ptrdiff_t z_offset = ((y + z_image_y) * z_mt->region->pitch +
997                                   (x + z_image_x));
998
999             if (map_z32f_x24s8) {
1000                z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0];
1001                s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1];
1002             } else {
1003                uint32_t packed = packed_map[y * map->w + x];
1004                s_map[s_offset] = packed >> 24;
1005                z_map[z_offset] = packed;
1006             }
1007          }
1008       }
1009
1010       intel_region_unmap(intel, s_mt->region);
1011       intel_region_unmap(intel, z_mt->region);
1012
1013       DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n",
1014           __FUNCTION__,
1015           map->x, map->y, map->w, map->h,
1016           z_mt, _mesa_get_format_name(z_mt->format),
1017           map->x + z_image_x, map->y + z_image_y,
1018           s_mt, map->x + s_image_x, map->y + s_image_y,
1019           map->ptr, map->stride);
1020    }
1021
1022    free(map->buffer);
1023 }
1024
1025 void
1026 intel_miptree_map(struct intel_context *intel,
1027                   struct intel_mipmap_tree *mt,
1028                   unsigned int level,
1029                   unsigned int slice,
1030                   unsigned int x,
1031                   unsigned int y,
1032                   unsigned int w,
1033                   unsigned int h,
1034                   GLbitfield mode,
1035                   void **out_ptr,
1036                   int *out_stride)
1037 {
1038    struct intel_miptree_map *map;
1039
1040    map = calloc(1, sizeof(struct intel_miptree_map));
1041    if (!map){
1042       *out_ptr = NULL;
1043       *out_stride = 0;
1044       return;
1045    }
1046
1047    assert(!mt->level[level].slice[slice].map);
1048    mt->level[level].slice[slice].map = map;
1049    map->mode = mode;
1050    map->x = x;
1051    map->y = y;
1052    map->w = w;
1053    map->h = h;
1054
1055    intel_miptree_slice_resolve_depth(intel, mt, level, slice);
1056    if (map->mode & GL_MAP_WRITE_BIT) {
1057       intel_miptree_slice_set_needs_hiz_resolve(mt, level, slice);
1058    }
1059
1060    if (mt->format == MESA_FORMAT_S8) {
1061       intel_miptree_map_s8(intel, mt, map, level, slice);
1062    } else if (mt->stencil_mt) {
1063       intel_miptree_map_depthstencil(intel, mt, map, level, slice);
1064    } else if (intel->has_llc &&
1065               !(mode & GL_MAP_WRITE_BIT) &&
1066               !mt->compressed &&
1067               mt->region->tiling == I915_TILING_X) {
1068       intel_miptree_map_blit(intel, mt, map, level, slice);
1069    } else {
1070       intel_miptree_map_gtt(intel, mt, map, level, slice);
1071    }
1072
1073    *out_ptr = map->ptr;
1074    *out_stride = map->stride;
1075
1076    if (map->ptr == NULL) {
1077       mt->level[level].slice[slice].map = NULL;
1078       free(map);
1079    }
1080 }
1081
1082 void
1083 intel_miptree_unmap(struct intel_context *intel,
1084                     struct intel_mipmap_tree *mt,
1085                     unsigned int level,
1086                     unsigned int slice)
1087 {
1088    struct intel_miptree_map *map = mt->level[level].slice[slice].map;
1089
1090    if (!map)
1091       return;
1092
1093    DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__,
1094        mt, _mesa_get_format_name(mt->format), level, slice);
1095
1096    if (mt->format == MESA_FORMAT_S8) {
1097       intel_miptree_unmap_s8(intel, mt, map, level, slice);
1098    } else if (mt->stencil_mt) {
1099       intel_miptree_unmap_depthstencil(intel, mt, map, level, slice);
1100    } else if (map->bo) {
1101       intel_miptree_unmap_blit(intel, mt, map, level, slice);
1102    } else {
1103       intel_miptree_unmap_gtt(intel, mt, map, level, slice);
1104    }
1105
1106    mt->level[level].slice[slice].map = NULL;
1107    free(map);
1108 }