1 /**************************************************************************
3 * Copyright 2007-2010 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 * Rasterization for binned triangles within a tile
35 * Prototype for a 8 plane rasterizer function. Will codegenerate
38 * XXX: Varients for more/fewer planes.
39 * XXX: Need ways of dropping planes as we descend.
43 TAG(do_block_4)(struct lp_rasterizer_task *task,
44 const struct lp_rast_triangle *tri,
45 const struct lp_rast_plane *plane,
49 unsigned mask = 0xffff;
52 for (j = 0; j < NR_PLANES; j++) {
53 mask &= ~build_mask_linear(c[j] - 1,
58 /* Now pass to the shader:
61 lp_rast_shade_quads_mask(task, &tri->inputs, x, y, mask);
65 * Evaluate a 16x16 block of pixels to determine which 4x4 subblocks are in/out
66 * of the triangle's bounds.
69 TAG(do_block_16)(struct lp_rasterizer_task *task,
70 const struct lp_rast_triangle *tri,
71 const struct lp_rast_plane *plane,
75 unsigned outmask, inmask, partmask, partial_mask;
78 outmask = 0; /* outside one or more trivial reject planes */
79 partmask = 0; /* outside one or more trivial accept planes */
81 for (j = 0; j < NR_PLANES; j++) {
82 const int dcdx = -plane[j].dcdx * 4;
83 const int dcdy = plane[j].dcdy * 4;
84 const int cox = plane[j].eo * 4;
85 const int ei = plane[j].dcdy - plane[j].dcdx - plane[j].eo;
86 const int cio = ei * 4 - 1;
88 build_masks(c[j] + cox,
91 &outmask, /* sign bits from c[i][0..15] + cox */
92 &partmask); /* sign bits from c[i][0..15] + cio */
95 if (outmask == 0xffff)
98 /* Mask of sub-blocks which are inside all trivial accept planes:
100 inmask = ~partmask & 0xffff;
102 /* Mask of sub-blocks which are inside all trivial reject planes,
103 * but outside at least one trivial accept plane:
105 partial_mask = partmask & ~outmask;
107 assert((partial_mask & inmask) == 0);
109 LP_COUNT_ADD(nr_empty_4, util_bitcount(0xffff & ~(partial_mask | inmask)));
111 /* Iterate over partials:
113 while (partial_mask) {
114 int i = ffs(partial_mask) - 1;
115 int ix = (i & 3) * 4;
116 int iy = (i >> 2) * 4;
121 partial_mask &= ~(1 << i);
123 LP_COUNT(nr_partially_covered_4);
125 for (j = 0; j < NR_PLANES; j++)
128 + plane[j].dcdy * iy);
130 TAG(do_block_4)(task, tri, plane, px, py, cx);
133 /* Iterate over fulls:
136 int i = ffs(inmask) - 1;
137 int ix = (i & 3) * 4;
138 int iy = (i >> 2) * 4;
144 LP_COUNT(nr_fully_covered_4);
145 block_full_4(task, tri, px, py);
151 * Scan the tile in chunks and figure out which pixels to rasterize
155 TAG(lp_rast_triangle)(struct lp_rasterizer_task *task,
156 const union lp_rast_cmd_arg arg)
158 const struct lp_rast_triangle *tri = arg.triangle.tri;
159 unsigned plane_mask = arg.triangle.plane_mask;
160 const struct lp_rast_plane *tri_plane = GET_PLANES(tri);
161 const int x = task->x, y = task->y;
162 struct lp_rast_plane plane[NR_PLANES];
164 unsigned outmask, inmask, partmask, partial_mask;
167 if (tri->inputs.disable) {
168 /* This triangle was partially binned and has been disabled */
172 outmask = 0; /* outside one or more trivial reject planes */
173 partmask = 0; /* outside one or more trivial accept planes */
176 int i = ffs(plane_mask) - 1;
177 plane[j] = tri_plane[i];
178 plane_mask &= ~(1 << i);
179 c[j] = plane[j].c + plane[j].dcdy * y - plane[j].dcdx * x;
182 const int dcdx = -plane[j].dcdx * 16;
183 const int dcdy = plane[j].dcdy * 16;
184 const int cox = plane[j].eo * 16;
185 const int ei = plane[j].dcdy - plane[j].dcdx - plane[j].eo;
186 const int cio = ei * 16 - 1;
188 build_masks(c[j] + cox,
191 &outmask, /* sign bits from c[i][0..15] + cox */
192 &partmask); /* sign bits from c[i][0..15] + cio */
198 if (outmask == 0xffff)
201 /* Mask of sub-blocks which are inside all trivial accept planes:
203 inmask = ~partmask & 0xffff;
205 /* Mask of sub-blocks which are inside all trivial reject planes,
206 * but outside at least one trivial accept plane:
208 partial_mask = partmask & ~outmask;
210 assert((partial_mask & inmask) == 0);
212 LP_COUNT_ADD(nr_empty_16, util_bitcount(0xffff & ~(partial_mask | inmask)));
214 /* Iterate over partials:
216 while (partial_mask) {
217 int i = ffs(partial_mask) - 1;
218 int ix = (i & 3) * 16;
219 int iy = (i >> 2) * 16;
224 for (j = 0; j < NR_PLANES; j++)
227 + plane[j].dcdy * iy);
229 partial_mask &= ~(1 << i);
231 LP_COUNT(nr_partially_covered_16);
232 TAG(do_block_16)(task, tri, plane, px, py, cx);
235 /* Iterate over fulls:
238 int i = ffs(inmask) - 1;
239 int ix = (i & 3) * 16;
240 int iy = (i >> 2) * 16;
246 LP_COUNT(nr_fully_covered_16);
247 block_full_16(task, tri, px, py);
251 #if defined(PIPE_ARCH_SSE) && defined(TRI_16)
252 /* XXX: special case this when intersection is not required.
253 * - tile completely within bbox,
254 * - bbox completely within tile.
257 TRI_16(struct lp_rasterizer_task *task,
258 const union lp_rast_cmd_arg arg)
260 const struct lp_rast_triangle *tri = arg.triangle.tri;
261 const struct lp_rast_plane *plane = GET_PLANES(tri);
262 unsigned mask = arg.triangle.plane_mask;
263 unsigned outmask, partial_mask;
265 __m128i cstep4[NR_PLANES][4];
267 int x = (mask & 0xff);
270 outmask = 0; /* outside one or more trivial reject planes */
275 for (j = 0; j < NR_PLANES; j++) {
276 const int dcdx = -plane[j].dcdx * 4;
277 const int dcdy = plane[j].dcdy * 4;
278 __m128i xdcdy = _mm_set1_epi32(dcdy);
280 cstep4[j][0] = _mm_setr_epi32(0, dcdx, dcdx*2, dcdx*3);
281 cstep4[j][1] = _mm_add_epi32(cstep4[j][0], xdcdy);
282 cstep4[j][2] = _mm_add_epi32(cstep4[j][1], xdcdy);
283 cstep4[j][3] = _mm_add_epi32(cstep4[j][2], xdcdy);
286 const int c = plane[j].c + plane[j].dcdy * y - plane[j].dcdx * x;
287 const int cox = plane[j].eo * 4;
289 outmask |= sign_bits4(cstep4[j], c + cox);
293 if (outmask == 0xffff)
297 /* Mask of sub-blocks which are inside all trivial reject planes,
298 * but outside at least one trivial accept plane:
300 partial_mask = 0xffff & ~outmask;
302 /* Iterate over partials:
304 while (partial_mask) {
305 int i = ffs(partial_mask) - 1;
306 int ix = (i & 3) * 4;
307 int iy = (i >> 2) * 4;
310 unsigned mask = 0xffff;
312 partial_mask &= ~(1 << i);
314 for (j = 0; j < NR_PLANES; j++) {
315 const int cx = (plane[j].c - 1
317 + plane[j].dcdy * py) * 4;
319 mask &= ~sign_bits4(cstep4[j], cx);
323 lp_rast_shade_quads_mask(task, &tri->inputs, px, py, mask);
328 #if defined(PIPE_ARCH_SSE) && defined(TRI_4)
330 TRI_4(struct lp_rasterizer_task *task,
331 const union lp_rast_cmd_arg arg)
333 const struct lp_rast_triangle *tri = arg.triangle.tri;
334 const struct lp_rast_plane *plane = GET_PLANES(tri);
335 unsigned mask = arg.triangle.plane_mask;
336 const int x = task->x + (mask & 0xff);
337 const int y = task->y + (mask >> 8);
340 /* Iterate over partials:
343 unsigned mask = 0xffff;
345 for (j = 0; j < NR_PLANES; j++) {
346 const int cx = (plane[j].c
348 + plane[j].dcdy * y);
350 const int dcdx = -plane[j].dcdx;
351 const int dcdy = plane[j].dcdy;
352 __m128i xdcdy = _mm_set1_epi32(dcdy);
354 __m128i cstep0 = _mm_setr_epi32(cx, cx + dcdx, cx + dcdx*2, cx + dcdx*3);
355 __m128i cstep1 = _mm_add_epi32(cstep0, xdcdy);
356 __m128i cstep2 = _mm_add_epi32(cstep1, xdcdy);
357 __m128i cstep3 = _mm_add_epi32(cstep2, xdcdy);
359 __m128i cstep01 = _mm_packs_epi32(cstep0, cstep1);
360 __m128i cstep23 = _mm_packs_epi32(cstep2, cstep3);
361 __m128i result = _mm_packs_epi16(cstep01, cstep23);
363 /* Extract the sign bits
365 mask &= ~_mm_movemask_epi8(result);
369 lp_rast_shade_quads_mask(task, &tri->inputs, x, y, mask);