1 #include "pipe/p_state.h"
2 #include "pipe/p_defines.h"
3 #include "util/u_inlines.h"
4 #include "util/u_format.h"
5 #include "util/u_math.h"
7 #include "nvfx_context.h"
8 #include "nv04_surface_2d.h"
13 nvfx_miptree_layout(struct nvfx_miptree *mt)
15 struct pipe_texture *pt = &mt->base;
16 uint width = pt->width0;
19 uint wide_pitch = pt->tex_usage & (PIPE_TEXTURE_USAGE_SAMPLER |
20 PIPE_TEXTURE_USAGE_DEPTH_STENCIL |
21 PIPE_TEXTURE_USAGE_RENDER_TARGET |
22 PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
23 PIPE_TEXTURE_USAGE_SCANOUT);
25 if (pt->target == PIPE_TEXTURE_CUBE) {
28 if (pt->target == PIPE_TEXTURE_3D) {
29 nr_faces = pt->depth0;
34 for (l = 0; l <= pt->last_level; l++) {
35 if (wide_pitch && (pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR))
36 mt->level[l].pitch = align(util_format_get_stride(pt->format, pt->width0), 64);
38 mt->level[l].pitch = util_format_get_stride(pt->format, width);
40 mt->level[l].image_offset =
41 CALLOC(nr_faces, sizeof(unsigned));
43 width = u_minify(width, 1);
46 for (f = 0; f < nr_faces; f++) {
47 for (l = 0; l < pt->last_level; l++) {
48 mt->level[l].image_offset[f] = offset;
50 if (!(pt->tex_usage & NOUVEAU_TEXTURE_USAGE_LINEAR) &&
51 u_minify(pt->width0, l + 1) > 1 && u_minify(pt->height0, l + 1) > 1)
52 offset += align(mt->level[l].pitch * u_minify(pt->height0, l), 64);
54 offset += mt->level[l].pitch * u_minify(pt->height0, l);
57 mt->level[l].image_offset[f] = offset;
58 offset += mt->level[l].pitch * u_minify(pt->height0, l);
61 mt->total_size = offset;
64 static struct pipe_texture *
65 nvfx_miptree_create(struct pipe_screen *pscreen, const struct pipe_texture *pt)
67 struct nvfx_miptree *mt;
68 unsigned buf_usage = PIPE_BUFFER_USAGE_PIXEL |
69 NOUVEAU_BUFFER_USAGE_TEXTURE;
70 static int no_swizzle = -1;
72 no_swizzle = debug_get_bool_option("NOUVEAU_NO_SWIZZLE", FALSE);
74 mt = MALLOC(sizeof(struct nvfx_miptree));
78 pipe_reference_init(&mt->base.reference, 1);
79 mt->base.screen = pscreen;
81 /* Swizzled textures must be POT */
82 if (pt->width0 & (pt->width0 - 1) ||
83 pt->height0 & (pt->height0 - 1))
84 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
86 if (pt->tex_usage & (PIPE_TEXTURE_USAGE_SCANOUT |
87 PIPE_TEXTURE_USAGE_DISPLAY_TARGET |
88 PIPE_TEXTURE_USAGE_DEPTH_STENCIL))
89 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
91 if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
92 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
95 case PIPE_FORMAT_B5G6R5_UNORM:
96 case PIPE_FORMAT_L8A8_UNORM:
97 case PIPE_FORMAT_A8_UNORM:
98 case PIPE_FORMAT_L8_UNORM:
99 case PIPE_FORMAT_I8_UNORM:
100 /* TODO: we can actually swizzle these formats on nv40, we
101 are just preserving the pre-unification behavior.
102 The whole 2D code is going to be rewritten anyway. */
103 if(nvfx_screen(pscreen)->is_nv4x) {
104 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
107 /* TODO: Figure out which formats can be swizzled */
108 case PIPE_FORMAT_B8G8R8A8_UNORM:
109 case PIPE_FORMAT_B8G8R8X8_UNORM:
110 case PIPE_FORMAT_R16_SNORM:
113 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
117 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
121 if (pt->tex_usage & PIPE_TEXTURE_USAGE_DYNAMIC)
122 buf_usage |= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
124 /* apparently we can't render to swizzled surfaces smaller than 64 bytes, so make them linear.
125 * If the user did not ask for a render target, they can still render to it, but it will cost them an extra copy.
126 * This also happens for small mipmaps of large textures. */
127 if (pt->tex_usage & PIPE_TEXTURE_USAGE_RENDER_TARGET && util_format_get_stride(pt->format, pt->width0) < 64)
128 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
130 nvfx_miptree_layout(mt);
132 mt->buffer = pscreen->buffer_create(pscreen, 256, buf_usage, mt->total_size);
137 mt->bo = nouveau_bo(mt->buffer);
141 static struct pipe_texture *
142 nvfx_miptree_blanket(struct pipe_screen *pscreen, const struct pipe_texture *pt,
143 const unsigned *stride, struct pipe_buffer *pb)
145 struct nvfx_miptree *mt;
147 /* Only supports 2D, non-mipmapped textures for the moment */
148 if (pt->target != PIPE_TEXTURE_2D || pt->last_level != 0 ||
152 mt = CALLOC_STRUCT(nvfx_miptree);
157 pipe_reference_init(&mt->base.reference, 1);
158 mt->base.screen = pscreen;
159 mt->level[0].pitch = stride[0];
160 mt->level[0].image_offset = CALLOC(1, sizeof(unsigned));
162 /* Assume whoever created this buffer expects it to be linear for now */
163 mt->base.tex_usage |= NOUVEAU_TEXTURE_USAGE_LINEAR;
165 pipe_buffer_reference(&mt->buffer, pb);
166 mt->bo = nouveau_bo(mt->buffer);
171 nvfx_miptree_destroy(struct pipe_texture *pt)
173 struct nvfx_miptree *mt = (struct nvfx_miptree *)pt;
176 pipe_buffer_reference(&mt->buffer, NULL);
177 for (l = 0; l <= pt->last_level; l++) {
178 if (mt->level[l].image_offset)
179 FREE(mt->level[l].image_offset);
185 static struct pipe_surface *
186 nvfx_miptree_surface_new(struct pipe_screen *pscreen, struct pipe_texture *pt,
187 unsigned face, unsigned level, unsigned zslice,
190 struct nvfx_miptree *mt = (struct nvfx_miptree *)pt;
191 struct nv04_surface *ns;
193 ns = CALLOC_STRUCT(nv04_surface);
196 pipe_texture_reference(&ns->base.texture, pt);
197 ns->base.format = pt->format;
198 ns->base.width = u_minify(pt->width0, level);
199 ns->base.height = u_minify(pt->height0, level);
200 ns->base.usage = flags;
201 pipe_reference_init(&ns->base.reference, 1);
202 ns->base.face = face;
203 ns->base.level = level;
204 ns->base.zslice = zslice;
205 ns->pitch = mt->level[level].pitch;
207 if (pt->target == PIPE_TEXTURE_CUBE) {
208 ns->base.offset = mt->level[level].image_offset[face];
210 if (pt->target == PIPE_TEXTURE_3D) {
211 ns->base.offset = mt->level[level].image_offset[zslice];
213 ns->base.offset = mt->level[level].image_offset[0];
216 /* create a linear temporary that we can render into if necessary.
217 * Note that ns->pitch is always a multiple of 64 for linear surfaces and swizzled surfaces are POT, so
218 * ns->pitch & 63 is equivalent to (ns->pitch < 64 && swizzled)*/
219 if((ns->pitch & 63) && (ns->base.usage & (PIPE_BUFFER_USAGE_GPU_WRITE | NOUVEAU_BUFFER_USAGE_NO_RENDER)) == PIPE_BUFFER_USAGE_GPU_WRITE)
220 return &nv04_surface_wrap_for_render(pscreen, ((struct nvfx_screen*)pscreen)->eng2d, ns)->base;
226 nvfx_miptree_surface_del(struct pipe_surface *ps)
228 struct nv04_surface* ns = (struct nv04_surface*)ps;
231 struct nvfx_screen* screen = (struct nvfx_screen*)ps->texture->screen;
232 if(ns->backing->base.usage & PIPE_BUFFER_USAGE_GPU_WRITE)
233 screen->eng2d->copy(screen->eng2d, &ns->backing->base, 0, 0, ps, 0, 0, ns->base.width, ns->base.height);
234 nvfx_miptree_surface_del(&ns->backing->base);
237 pipe_texture_reference(&ps->texture, NULL);
242 nvfx_screen_init_miptree_functions(struct pipe_screen *pscreen)
244 pscreen->texture_create = nvfx_miptree_create;
245 pscreen->texture_destroy = nvfx_miptree_destroy;
246 pscreen->get_tex_surface = nvfx_miptree_surface_new;
247 pscreen->tex_surface_destroy = nvfx_miptree_surface_del;
249 nouveau_screen(pscreen)->texture_blanket = nvfx_miptree_blanket;