2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include "evergreend.h"
31 #include "evergreen_reg_safe.h"
32 #include "cayman_reg_safe.h"
34 #define MAX(a,b) (((a)>(b))?(a):(b))
35 #define MIN(a,b) (((a)<(b))?(a):(b))
37 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
38 struct radeon_cs_reloc **cs_reloc);
39 struct evergreen_cs_track {
45 u32 nsamples; /* unused */
46 struct radeon_bo *cb_color_bo[12];
47 u32 cb_color_bo_offset[12];
48 struct radeon_bo *cb_color_fmask_bo[8]; /* unused */
49 struct radeon_bo *cb_color_cmask_bo[8]; /* unused */
50 u32 cb_color_info[12];
51 u32 cb_color_view[12];
52 u32 cb_color_pitch[12];
53 u32 cb_color_slice[12];
54 u32 cb_color_slice_idx[12];
55 u32 cb_color_attrib[12];
56 u32 cb_color_cmask_slice[8];/* unused */
57 u32 cb_color_fmask_slice[8];/* unused */
59 u32 cb_shader_mask; /* unused */
60 u32 vgt_strmout_config;
61 u32 vgt_strmout_buffer_config;
62 struct radeon_bo *vgt_strmout_bo[4];
63 u32 vgt_strmout_bo_offset[4];
64 u32 vgt_strmout_size[4];
71 u32 db_z_write_offset;
72 struct radeon_bo *db_z_read_bo;
73 struct radeon_bo *db_z_write_bo;
76 u32 db_s_write_offset;
77 struct radeon_bo *db_s_read_bo;
78 struct radeon_bo *db_s_write_bo;
79 bool sx_misc_kill_all_prims;
85 struct radeon_bo *htile_bo;
88 static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
90 if (tiling_flags & RADEON_TILING_MACRO)
91 return ARRAY_2D_TILED_THIN1;
92 else if (tiling_flags & RADEON_TILING_MICRO)
93 return ARRAY_1D_TILED_THIN1;
95 return ARRAY_LINEAR_GENERAL;
98 static u32 evergreen_cs_get_num_banks(u32 nbanks)
102 return ADDR_SURF_2_BANK;
104 return ADDR_SURF_4_BANK;
107 return ADDR_SURF_8_BANK;
109 return ADDR_SURF_16_BANK;
113 static void evergreen_cs_track_init(struct evergreen_cs_track *track)
117 for (i = 0; i < 8; i++) {
118 track->cb_color_fmask_bo[i] = NULL;
119 track->cb_color_cmask_bo[i] = NULL;
120 track->cb_color_cmask_slice[i] = 0;
121 track->cb_color_fmask_slice[i] = 0;
124 for (i = 0; i < 12; i++) {
125 track->cb_color_bo[i] = NULL;
126 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
127 track->cb_color_info[i] = 0;
128 track->cb_color_view[i] = 0xFFFFFFFF;
129 track->cb_color_pitch[i] = 0;
130 track->cb_color_slice[i] = 0xfffffff;
131 track->cb_color_slice_idx[i] = 0;
133 track->cb_target_mask = 0xFFFFFFFF;
134 track->cb_shader_mask = 0xFFFFFFFF;
135 track->cb_dirty = true;
137 track->db_depth_slice = 0xffffffff;
138 track->db_depth_view = 0xFFFFC000;
139 track->db_depth_size = 0xFFFFFFFF;
140 track->db_depth_control = 0xFFFFFFFF;
141 track->db_z_info = 0xFFFFFFFF;
142 track->db_z_read_offset = 0xFFFFFFFF;
143 track->db_z_write_offset = 0xFFFFFFFF;
144 track->db_z_read_bo = NULL;
145 track->db_z_write_bo = NULL;
146 track->db_s_info = 0xFFFFFFFF;
147 track->db_s_read_offset = 0xFFFFFFFF;
148 track->db_s_write_offset = 0xFFFFFFFF;
149 track->db_s_read_bo = NULL;
150 track->db_s_write_bo = NULL;
151 track->db_dirty = true;
152 track->htile_bo = NULL;
153 track->htile_offset = 0xFFFFFFFF;
154 track->htile_surface = 0;
156 for (i = 0; i < 4; i++) {
157 track->vgt_strmout_size[i] = 0;
158 track->vgt_strmout_bo[i] = NULL;
159 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
161 track->streamout_dirty = true;
162 track->sx_misc_kill_all_prims = false;
166 /* value gathered from cs */
182 unsigned long base_align;
185 static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
186 struct eg_surface *surf,
189 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
190 surf->base_align = surf->bpe;
196 static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
197 struct eg_surface *surf,
200 struct evergreen_cs_track *track = p->track;
203 palign = MAX(64, track->group_size / surf->bpe);
204 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
205 surf->base_align = track->group_size;
206 surf->palign = palign;
208 if (surf->nbx & (palign - 1)) {
210 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
211 __func__, __LINE__, prefix, surf->nbx, palign);
218 static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
219 struct eg_surface *surf,
222 struct evergreen_cs_track *track = p->track;
225 palign = track->group_size / (8 * surf->bpe * surf->nsamples);
226 palign = MAX(8, palign);
227 surf->layer_size = surf->nbx * surf->nby * surf->bpe;
228 surf->base_align = track->group_size;
229 surf->palign = palign;
231 if ((surf->nbx & (palign - 1))) {
233 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
234 __func__, __LINE__, prefix, surf->nbx, palign,
235 track->group_size, surf->bpe, surf->nsamples);
239 if ((surf->nby & (8 - 1))) {
241 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
242 __func__, __LINE__, prefix, surf->nby);
249 static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
250 struct eg_surface *surf,
253 struct evergreen_cs_track *track = p->track;
254 unsigned palign, halign, tileb, slice_pt;
255 unsigned mtile_pr, mtile_ps, mtileb;
257 tileb = 64 * surf->bpe * surf->nsamples;
259 if (tileb > surf->tsplit) {
260 slice_pt = tileb / surf->tsplit;
262 tileb = tileb / slice_pt;
263 /* macro tile width & height */
264 palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
265 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
266 mtileb = (palign / 8) * (halign / 8) * tileb;
267 mtile_pr = surf->nbx / palign;
268 mtile_ps = (mtile_pr * surf->nby) / halign;
269 surf->layer_size = mtile_ps * mtileb * slice_pt;
270 surf->base_align = (palign / 8) * (halign / 8) * tileb;
271 surf->palign = palign;
272 surf->halign = halign;
274 if ((surf->nbx & (palign - 1))) {
276 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
277 __func__, __LINE__, prefix, surf->nbx, palign);
281 if ((surf->nby & (halign - 1))) {
283 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
284 __func__, __LINE__, prefix, surf->nby, halign);
292 static int evergreen_surface_check(struct radeon_cs_parser *p,
293 struct eg_surface *surf,
296 /* some common value computed here */
297 surf->bpe = r600_fmt_get_blocksize(surf->format);
299 switch (surf->mode) {
300 case ARRAY_LINEAR_GENERAL:
301 return evergreen_surface_check_linear(p, surf, prefix);
302 case ARRAY_LINEAR_ALIGNED:
303 return evergreen_surface_check_linear_aligned(p, surf, prefix);
304 case ARRAY_1D_TILED_THIN1:
305 return evergreen_surface_check_1d(p, surf, prefix);
306 case ARRAY_2D_TILED_THIN1:
307 return evergreen_surface_check_2d(p, surf, prefix);
309 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
310 __func__, __LINE__, prefix, surf->mode);
316 static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
317 struct eg_surface *surf,
320 switch (surf->mode) {
321 case ARRAY_2D_TILED_THIN1:
323 case ARRAY_LINEAR_GENERAL:
324 case ARRAY_LINEAR_ALIGNED:
325 case ARRAY_1D_TILED_THIN1:
328 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
329 __func__, __LINE__, prefix, surf->mode);
333 switch (surf->nbanks) {
334 case 0: surf->nbanks = 2; break;
335 case 1: surf->nbanks = 4; break;
336 case 2: surf->nbanks = 8; break;
337 case 3: surf->nbanks = 16; break;
339 dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
340 __func__, __LINE__, prefix, surf->nbanks);
343 switch (surf->bankw) {
344 case 0: surf->bankw = 1; break;
345 case 1: surf->bankw = 2; break;
346 case 2: surf->bankw = 4; break;
347 case 3: surf->bankw = 8; break;
349 dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
350 __func__, __LINE__, prefix, surf->bankw);
353 switch (surf->bankh) {
354 case 0: surf->bankh = 1; break;
355 case 1: surf->bankh = 2; break;
356 case 2: surf->bankh = 4; break;
357 case 3: surf->bankh = 8; break;
359 dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
360 __func__, __LINE__, prefix, surf->bankh);
363 switch (surf->mtilea) {
364 case 0: surf->mtilea = 1; break;
365 case 1: surf->mtilea = 2; break;
366 case 2: surf->mtilea = 4; break;
367 case 3: surf->mtilea = 8; break;
369 dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
370 __func__, __LINE__, prefix, surf->mtilea);
373 switch (surf->tsplit) {
374 case 0: surf->tsplit = 64; break;
375 case 1: surf->tsplit = 128; break;
376 case 2: surf->tsplit = 256; break;
377 case 3: surf->tsplit = 512; break;
378 case 4: surf->tsplit = 1024; break;
379 case 5: surf->tsplit = 2048; break;
380 case 6: surf->tsplit = 4096; break;
382 dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
383 __func__, __LINE__, prefix, surf->tsplit);
389 static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
391 struct evergreen_cs_track *track = p->track;
392 struct eg_surface surf;
393 unsigned pitch, slice, mslice;
394 unsigned long offset;
397 mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
398 pitch = track->cb_color_pitch[id];
399 slice = track->cb_color_slice[id];
400 surf.nbx = (pitch + 1) * 8;
401 surf.nby = ((slice + 1) * 64) / surf.nbx;
402 surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
403 surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
404 surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
405 surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
406 surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
407 surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
408 surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
411 if (!r600_fmt_is_valid_color(surf.format)) {
412 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
413 __func__, __LINE__, surf.format,
414 id, track->cb_color_info[id]);
418 r = evergreen_surface_value_conv_check(p, &surf, "cb");
423 r = evergreen_surface_check(p, &surf, "cb");
425 dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
426 __func__, __LINE__, id, track->cb_color_pitch[id],
427 track->cb_color_slice[id], track->cb_color_attrib[id],
428 track->cb_color_info[id]);
432 offset = track->cb_color_bo_offset[id] << 8;
433 if (offset & (surf.base_align - 1)) {
434 dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
435 __func__, __LINE__, id, offset, surf.base_align);
439 offset += surf.layer_size * mslice;
440 if (offset > radeon_bo_size(track->cb_color_bo[id])) {
441 /* old ddx are broken they allocate bo with w*h*bpp but
442 * program slice with ALIGN(h, 8), catch this and patch
446 volatile u32 *ib = p->ib.ptr;
447 unsigned long tmp, nby, bsize, size, min = 0;
449 /* find the height the ddx wants */
453 bsize = radeon_bo_size(track->cb_color_bo[id]);
454 tmp = track->cb_color_bo_offset[id] << 8;
455 for (nby = surf.nby; nby > min; nby--) {
456 size = nby * surf.nbx * surf.bpe * surf.nsamples;
457 if ((tmp + size * mslice) <= bsize) {
463 slice = ((nby * surf.nbx) / 64) - 1;
464 if (!evergreen_surface_check(p, &surf, "cb")) {
465 /* check if this one works */
466 tmp += surf.layer_size * mslice;
468 ib[track->cb_color_slice_idx[id]] = slice;
474 dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
475 "offset %d, max layer %d, bo size %ld, slice %d)\n",
476 __func__, __LINE__, id, surf.layer_size,
477 track->cb_color_bo_offset[id] << 8, mslice,
478 radeon_bo_size(track->cb_color_bo[id]), slice);
479 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
480 __func__, __LINE__, surf.nbx, surf.nby,
481 surf.mode, surf.bpe, surf.nsamples,
482 surf.bankw, surf.bankh,
483 surf.tsplit, surf.mtilea);
491 static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
492 unsigned nbx, unsigned nby)
494 struct evergreen_cs_track *track = p->track;
497 if (track->htile_bo == NULL) {
498 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
499 __func__, __LINE__, track->db_z_info);
503 if (G_028ABC_LINEAR(track->htile_surface)) {
504 /* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
505 nbx = round_up(nbx, 16 * 8);
506 /* height is npipes htiles aligned == npipes * 8 pixel aligned */
507 nby = round_up(nby, track->npipes * 8);
509 /* always assume 8x8 htile */
510 /* align is htile align * 8, htile align vary according to
511 * number of pipe and tile width and nby
513 switch (track->npipes) {
515 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
516 nbx = round_up(nbx, 64 * 8);
517 nby = round_up(nby, 64 * 8);
520 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
521 nbx = round_up(nbx, 64 * 8);
522 nby = round_up(nby, 32 * 8);
525 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
526 nbx = round_up(nbx, 32 * 8);
527 nby = round_up(nby, 32 * 8);
530 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
531 nbx = round_up(nbx, 32 * 8);
532 nby = round_up(nby, 16 * 8);
535 dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
536 __func__, __LINE__, track->npipes);
540 /* compute number of htile */
543 /* size must be aligned on npipes * 2K boundary */
544 size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
545 size += track->htile_offset;
547 if (size > radeon_bo_size(track->htile_bo)) {
548 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
549 __func__, __LINE__, radeon_bo_size(track->htile_bo),
556 static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
558 struct evergreen_cs_track *track = p->track;
559 struct eg_surface surf;
560 unsigned pitch, slice, mslice;
561 unsigned long offset;
564 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
565 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
566 slice = track->db_depth_slice;
567 surf.nbx = (pitch + 1) * 8;
568 surf.nby = ((slice + 1) * 64) / surf.nbx;
569 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
570 surf.format = G_028044_FORMAT(track->db_s_info);
571 surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
572 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
573 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
574 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
575 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
578 if (surf.format != 1) {
579 dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
580 __func__, __LINE__, surf.format);
583 /* replace by color format so we can use same code */
584 surf.format = V_028C70_COLOR_8;
586 r = evergreen_surface_value_conv_check(p, &surf, "stencil");
591 r = evergreen_surface_check(p, &surf, NULL);
593 /* old userspace doesn't compute proper depth/stencil alignment
594 * check that alignment against a bigger byte per elements and
595 * only report if that alignment is wrong too.
597 surf.format = V_028C70_COLOR_8_8_8_8;
598 r = evergreen_surface_check(p, &surf, "stencil");
600 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
601 __func__, __LINE__, track->db_depth_size,
602 track->db_depth_slice, track->db_s_info, track->db_z_info);
607 offset = track->db_s_read_offset << 8;
608 if (offset & (surf.base_align - 1)) {
609 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
610 __func__, __LINE__, offset, surf.base_align);
613 offset += surf.layer_size * mslice;
614 if (offset > radeon_bo_size(track->db_s_read_bo)) {
615 dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
616 "offset %ld, max layer %d, bo size %ld)\n",
617 __func__, __LINE__, surf.layer_size,
618 (unsigned long)track->db_s_read_offset << 8, mslice,
619 radeon_bo_size(track->db_s_read_bo));
620 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
621 __func__, __LINE__, track->db_depth_size,
622 track->db_depth_slice, track->db_s_info, track->db_z_info);
626 offset = track->db_s_write_offset << 8;
627 if (offset & (surf.base_align - 1)) {
628 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
629 __func__, __LINE__, offset, surf.base_align);
632 offset += surf.layer_size * mslice;
633 if (offset > radeon_bo_size(track->db_s_write_bo)) {
634 dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
635 "offset %ld, max layer %d, bo size %ld)\n",
636 __func__, __LINE__, surf.layer_size,
637 (unsigned long)track->db_s_write_offset << 8, mslice,
638 radeon_bo_size(track->db_s_write_bo));
643 if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
644 r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
653 static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
655 struct evergreen_cs_track *track = p->track;
656 struct eg_surface surf;
657 unsigned pitch, slice, mslice;
658 unsigned long offset;
661 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
662 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
663 slice = track->db_depth_slice;
664 surf.nbx = (pitch + 1) * 8;
665 surf.nby = ((slice + 1) * 64) / surf.nbx;
666 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
667 surf.format = G_028040_FORMAT(track->db_z_info);
668 surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
669 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
670 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
671 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
672 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
675 switch (surf.format) {
677 surf.format = V_028C70_COLOR_16;
680 case V_028040_Z_32_FLOAT:
681 surf.format = V_028C70_COLOR_8_8_8_8;
684 dev_warn(p->dev, "%s:%d depth invalid format %d\n",
685 __func__, __LINE__, surf.format);
689 r = evergreen_surface_value_conv_check(p, &surf, "depth");
691 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
692 __func__, __LINE__, track->db_depth_size,
693 track->db_depth_slice, track->db_z_info);
697 r = evergreen_surface_check(p, &surf, "depth");
699 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
700 __func__, __LINE__, track->db_depth_size,
701 track->db_depth_slice, track->db_z_info);
705 offset = track->db_z_read_offset << 8;
706 if (offset & (surf.base_align - 1)) {
707 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
708 __func__, __LINE__, offset, surf.base_align);
711 offset += surf.layer_size * mslice;
712 if (offset > radeon_bo_size(track->db_z_read_bo)) {
713 dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
714 "offset %ld, max layer %d, bo size %ld)\n",
715 __func__, __LINE__, surf.layer_size,
716 (unsigned long)track->db_z_read_offset << 8, mslice,
717 radeon_bo_size(track->db_z_read_bo));
721 offset = track->db_z_write_offset << 8;
722 if (offset & (surf.base_align - 1)) {
723 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
724 __func__, __LINE__, offset, surf.base_align);
727 offset += surf.layer_size * mslice;
728 if (offset > radeon_bo_size(track->db_z_write_bo)) {
729 dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
730 "offset %ld, max layer %d, bo size %ld)\n",
731 __func__, __LINE__, surf.layer_size,
732 (unsigned long)track->db_z_write_offset << 8, mslice,
733 radeon_bo_size(track->db_z_write_bo));
738 if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
739 r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
748 static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
749 struct radeon_bo *texture,
750 struct radeon_bo *mipmap,
753 struct eg_surface surf;
754 unsigned long toffset, moffset;
755 unsigned dim, llevel, mslice, width, height, depth, i;
759 texdw[0] = radeon_get_ib_value(p, idx + 0);
760 texdw[1] = radeon_get_ib_value(p, idx + 1);
761 texdw[2] = radeon_get_ib_value(p, idx + 2);
762 texdw[3] = radeon_get_ib_value(p, idx + 3);
763 texdw[4] = radeon_get_ib_value(p, idx + 4);
764 texdw[5] = radeon_get_ib_value(p, idx + 5);
765 texdw[6] = radeon_get_ib_value(p, idx + 6);
766 texdw[7] = radeon_get_ib_value(p, idx + 7);
767 dim = G_030000_DIM(texdw[0]);
768 llevel = G_030014_LAST_LEVEL(texdw[5]);
769 mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
770 width = G_030000_TEX_WIDTH(texdw[0]) + 1;
771 height = G_030004_TEX_HEIGHT(texdw[1]) + 1;
772 depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
773 surf.format = G_03001C_DATA_FORMAT(texdw[7]);
774 surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
775 surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
776 surf.nby = r600_fmt_get_nblocksy(surf.format, height);
777 surf.mode = G_030004_ARRAY_MODE(texdw[1]);
778 surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
779 surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
780 surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
781 surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
782 surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
784 toffset = texdw[2] << 8;
785 moffset = texdw[3] << 8;
787 if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
788 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
789 __func__, __LINE__, surf.format);
793 case V_030000_SQ_TEX_DIM_1D:
794 case V_030000_SQ_TEX_DIM_2D:
795 case V_030000_SQ_TEX_DIM_CUBEMAP:
796 case V_030000_SQ_TEX_DIM_1D_ARRAY:
797 case V_030000_SQ_TEX_DIM_2D_ARRAY:
800 case V_030000_SQ_TEX_DIM_2D_MSAA:
801 case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
802 surf.nsamples = 1 << llevel;
806 case V_030000_SQ_TEX_DIM_3D:
809 dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
810 __func__, __LINE__, dim);
814 r = evergreen_surface_value_conv_check(p, &surf, "texture");
820 evergreen_surface_check(p, &surf, NULL);
821 surf.nby = ALIGN(surf.nby, surf.halign);
823 r = evergreen_surface_check(p, &surf, "texture");
825 dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
826 __func__, __LINE__, texdw[0], texdw[1], texdw[4],
827 texdw[5], texdw[6], texdw[7]);
831 /* check texture size */
832 if (toffset & (surf.base_align - 1)) {
833 dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
834 __func__, __LINE__, toffset, surf.base_align);
837 if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
838 dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
839 __func__, __LINE__, moffset, surf.base_align);
842 if (dim == SQ_TEX_DIM_3D) {
843 toffset += surf.layer_size * depth;
845 toffset += surf.layer_size * mslice;
847 if (toffset > radeon_bo_size(texture)) {
848 dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
849 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
850 __func__, __LINE__, surf.layer_size,
851 (unsigned long)texdw[2] << 8, mslice,
852 depth, radeon_bo_size(texture),
859 dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
863 return 0; /* everything's ok */
867 /* check mipmap size */
868 for (i = 1; i <= llevel; i++) {
871 w = r600_mip_minify(width, i);
872 h = r600_mip_minify(height, i);
873 d = r600_mip_minify(depth, i);
874 surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
875 surf.nby = r600_fmt_get_nblocksy(surf.format, h);
878 case ARRAY_2D_TILED_THIN1:
879 if (surf.nbx < surf.palign || surf.nby < surf.halign) {
880 surf.mode = ARRAY_1D_TILED_THIN1;
882 /* recompute alignment */
883 evergreen_surface_check(p, &surf, NULL);
885 case ARRAY_LINEAR_GENERAL:
886 case ARRAY_LINEAR_ALIGNED:
887 case ARRAY_1D_TILED_THIN1:
890 dev_warn(p->dev, "%s:%d invalid array mode %d\n",
891 __func__, __LINE__, surf.mode);
894 surf.nbx = ALIGN(surf.nbx, surf.palign);
895 surf.nby = ALIGN(surf.nby, surf.halign);
897 r = evergreen_surface_check(p, &surf, "mipmap");
902 if (dim == SQ_TEX_DIM_3D) {
903 moffset += surf.layer_size * d;
905 moffset += surf.layer_size * mslice;
907 if (moffset > radeon_bo_size(mipmap)) {
908 dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
909 "offset %ld, coffset %ld, max layer %d, depth %d, "
910 "bo size %ld) level0 (%d %d %d)\n",
911 __func__, __LINE__, i, surf.layer_size,
912 (unsigned long)texdw[3] << 8, moffset, mslice,
913 d, radeon_bo_size(mipmap),
914 width, height, depth);
915 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
916 __func__, __LINE__, surf.nbx, surf.nby,
917 surf.mode, surf.bpe, surf.nsamples,
918 surf.bankw, surf.bankh,
919 surf.tsplit, surf.mtilea);
927 static int evergreen_cs_track_check(struct radeon_cs_parser *p)
929 struct evergreen_cs_track *track = p->track;
932 unsigned buffer_mask = 0;
934 /* check streamout */
935 if (track->streamout_dirty && track->vgt_strmout_config) {
936 for (i = 0; i < 4; i++) {
937 if (track->vgt_strmout_config & (1 << i)) {
938 buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
942 for (i = 0; i < 4; i++) {
943 if (buffer_mask & (1 << i)) {
944 if (track->vgt_strmout_bo[i]) {
945 u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
946 (u64)track->vgt_strmout_size[i];
947 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
948 DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
950 radeon_bo_size(track->vgt_strmout_bo[i]));
954 dev_warn(p->dev, "No buffer for streamout %d\n", i);
959 track->streamout_dirty = false;
962 if (track->sx_misc_kill_all_prims)
965 /* check that we have a cb for each enabled target
967 if (track->cb_dirty) {
968 tmp = track->cb_target_mask;
969 for (i = 0; i < 8; i++) {
970 if ((tmp >> (i * 4)) & 0xF) {
971 /* at least one component is enabled */
972 if (track->cb_color_bo[i] == NULL) {
973 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
974 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
978 r = evergreen_cs_track_validate_cb(p, i);
984 track->cb_dirty = false;
987 if (track->db_dirty) {
988 /* Check stencil buffer */
989 if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
990 G_028800_STENCIL_ENABLE(track->db_depth_control)) {
991 r = evergreen_cs_track_validate_stencil(p);
995 /* Check depth buffer */
996 if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
997 G_028800_Z_ENABLE(track->db_depth_control)) {
998 r = evergreen_cs_track_validate_depth(p);
1002 track->db_dirty = false;
1009 * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
1010 * @parser: parser structure holding parsing context.
1012 * This is an Evergreen(+)-specific function for parsing VLINE packets.
1013 * Real work is done by r600_cs_common_vline_parse function.
1014 * Here we just set up ASIC-specific register table and call
1015 * the common implementation function.
1017 static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
1020 static uint32_t vline_start_end[6] = {
1021 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
1022 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
1023 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
1024 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
1025 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
1026 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
1028 static uint32_t vline_status[6] = {
1029 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1030 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1031 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1032 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1033 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1034 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
1037 return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
1040 static int evergreen_packet0_check(struct radeon_cs_parser *p,
1041 struct radeon_cs_packet *pkt,
1042 unsigned idx, unsigned reg)
1047 case EVERGREEN_VLINE_START_END:
1048 r = evergreen_cs_packet_parse_vline(p);
1050 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1056 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1063 static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
1064 struct radeon_cs_packet *pkt)
1072 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
1073 r = evergreen_packet0_check(p, pkt, idx, reg);
1082 * evergreen_cs_check_reg() - check if register is authorized or not
1083 * @parser: parser structure holding parsing context
1084 * @reg: register we are testing
1085 * @idx: index into the cs buffer
1087 * This function will test against evergreen_reg_safe_bm and return 0
1088 * if register is safe. If register is not flag as safe this function
1089 * will test it against a list of register needind special handling.
1091 static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1093 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
1094 struct radeon_cs_reloc *reloc;
1099 if (p->rdev->family >= CHIP_CAYMAN)
1100 last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
1102 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
1105 if (i >= last_reg) {
1106 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1109 m = 1 << ((reg >> 2) & 31);
1110 if (p->rdev->family >= CHIP_CAYMAN) {
1111 if (!(cayman_reg_safe_bm[i] & m))
1114 if (!(evergreen_reg_safe_bm[i] & m))
1119 /* force following reg to 0 in an attempt to disable out buffer
1120 * which will need us to better understand how it works to perform
1121 * security check on it (Jerome)
1123 case SQ_ESGS_RING_SIZE:
1124 case SQ_GSVS_RING_SIZE:
1125 case SQ_ESTMP_RING_SIZE:
1126 case SQ_GSTMP_RING_SIZE:
1127 case SQ_HSTMP_RING_SIZE:
1128 case SQ_LSTMP_RING_SIZE:
1129 case SQ_PSTMP_RING_SIZE:
1130 case SQ_VSTMP_RING_SIZE:
1131 case SQ_ESGS_RING_ITEMSIZE:
1132 case SQ_ESTMP_RING_ITEMSIZE:
1133 case SQ_GSTMP_RING_ITEMSIZE:
1134 case SQ_GSVS_RING_ITEMSIZE:
1135 case SQ_GS_VERT_ITEMSIZE:
1136 case SQ_GS_VERT_ITEMSIZE_1:
1137 case SQ_GS_VERT_ITEMSIZE_2:
1138 case SQ_GS_VERT_ITEMSIZE_3:
1139 case SQ_GSVS_RING_OFFSET_1:
1140 case SQ_GSVS_RING_OFFSET_2:
1141 case SQ_GSVS_RING_OFFSET_3:
1142 case SQ_HSTMP_RING_ITEMSIZE:
1143 case SQ_LSTMP_RING_ITEMSIZE:
1144 case SQ_PSTMP_RING_ITEMSIZE:
1145 case SQ_VSTMP_RING_ITEMSIZE:
1146 case VGT_TF_RING_SIZE:
1147 /* get value to populate the IB don't remove */
1148 /*tmp =radeon_get_ib_value(p, idx);
1151 case SQ_ESGS_RING_BASE:
1152 case SQ_GSVS_RING_BASE:
1153 case SQ_ESTMP_RING_BASE:
1154 case SQ_GSTMP_RING_BASE:
1155 case SQ_HSTMP_RING_BASE:
1156 case SQ_LSTMP_RING_BASE:
1157 case SQ_PSTMP_RING_BASE:
1158 case SQ_VSTMP_RING_BASE:
1159 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1161 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1165 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1167 case DB_DEPTH_CONTROL:
1168 track->db_depth_control = radeon_get_ib_value(p, idx);
1169 track->db_dirty = true;
1171 case CAYMAN_DB_EQAA:
1172 if (p->rdev->family < CHIP_CAYMAN) {
1173 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1178 case CAYMAN_DB_DEPTH_INFO:
1179 if (p->rdev->family < CHIP_CAYMAN) {
1180 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1186 track->db_z_info = radeon_get_ib_value(p, idx);
1187 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1188 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1190 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1194 ib[idx] &= ~Z_ARRAY_MODE(0xf);
1195 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
1196 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1197 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1198 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1199 unsigned bankw, bankh, mtaspect, tile_split;
1201 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1202 &bankw, &bankh, &mtaspect,
1204 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1205 ib[idx] |= DB_TILE_SPLIT(tile_split) |
1206 DB_BANK_WIDTH(bankw) |
1207 DB_BANK_HEIGHT(bankh) |
1208 DB_MACRO_TILE_ASPECT(mtaspect);
1211 track->db_dirty = true;
1213 case DB_STENCIL_INFO:
1214 track->db_s_info = radeon_get_ib_value(p, idx);
1215 track->db_dirty = true;
1218 track->db_depth_view = radeon_get_ib_value(p, idx);
1219 track->db_dirty = true;
1222 track->db_depth_size = radeon_get_ib_value(p, idx);
1223 track->db_dirty = true;
1225 case R_02805C_DB_DEPTH_SLICE:
1226 track->db_depth_slice = radeon_get_ib_value(p, idx);
1227 track->db_dirty = true;
1229 case DB_Z_READ_BASE:
1230 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1232 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1236 track->db_z_read_offset = radeon_get_ib_value(p, idx);
1237 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1238 track->db_z_read_bo = reloc->robj;
1239 track->db_dirty = true;
1241 case DB_Z_WRITE_BASE:
1242 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1244 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1248 track->db_z_write_offset = radeon_get_ib_value(p, idx);
1249 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1250 track->db_z_write_bo = reloc->robj;
1251 track->db_dirty = true;
1253 case DB_STENCIL_READ_BASE:
1254 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1256 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1260 track->db_s_read_offset = radeon_get_ib_value(p, idx);
1261 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1262 track->db_s_read_bo = reloc->robj;
1263 track->db_dirty = true;
1265 case DB_STENCIL_WRITE_BASE:
1266 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1268 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1272 track->db_s_write_offset = radeon_get_ib_value(p, idx);
1273 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1274 track->db_s_write_bo = reloc->robj;
1275 track->db_dirty = true;
1277 case VGT_STRMOUT_CONFIG:
1278 track->vgt_strmout_config = radeon_get_ib_value(p, idx);
1279 track->streamout_dirty = true;
1281 case VGT_STRMOUT_BUFFER_CONFIG:
1282 track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
1283 track->streamout_dirty = true;
1285 case VGT_STRMOUT_BUFFER_BASE_0:
1286 case VGT_STRMOUT_BUFFER_BASE_1:
1287 case VGT_STRMOUT_BUFFER_BASE_2:
1288 case VGT_STRMOUT_BUFFER_BASE_3:
1289 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1291 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1295 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1296 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1297 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1298 track->vgt_strmout_bo[tmp] = reloc->robj;
1299 track->streamout_dirty = true;
1301 case VGT_STRMOUT_BUFFER_SIZE_0:
1302 case VGT_STRMOUT_BUFFER_SIZE_1:
1303 case VGT_STRMOUT_BUFFER_SIZE_2:
1304 case VGT_STRMOUT_BUFFER_SIZE_3:
1305 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1306 /* size in register is DWs, convert to bytes */
1307 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1308 track->streamout_dirty = true;
1311 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1313 dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1317 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1318 case CB_TARGET_MASK:
1319 track->cb_target_mask = radeon_get_ib_value(p, idx);
1320 track->cb_dirty = true;
1322 case CB_SHADER_MASK:
1323 track->cb_shader_mask = radeon_get_ib_value(p, idx);
1324 track->cb_dirty = true;
1326 case PA_SC_AA_CONFIG:
1327 if (p->rdev->family >= CHIP_CAYMAN) {
1328 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1332 tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
1333 track->nsamples = 1 << tmp;
1335 case CAYMAN_PA_SC_AA_CONFIG:
1336 if (p->rdev->family < CHIP_CAYMAN) {
1337 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1341 tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
1342 track->nsamples = 1 << tmp;
1344 case CB_COLOR0_VIEW:
1345 case CB_COLOR1_VIEW:
1346 case CB_COLOR2_VIEW:
1347 case CB_COLOR3_VIEW:
1348 case CB_COLOR4_VIEW:
1349 case CB_COLOR5_VIEW:
1350 case CB_COLOR6_VIEW:
1351 case CB_COLOR7_VIEW:
1352 tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
1353 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1354 track->cb_dirty = true;
1356 case CB_COLOR8_VIEW:
1357 case CB_COLOR9_VIEW:
1358 case CB_COLOR10_VIEW:
1359 case CB_COLOR11_VIEW:
1360 tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
1361 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1362 track->cb_dirty = true;
1364 case CB_COLOR0_INFO:
1365 case CB_COLOR1_INFO:
1366 case CB_COLOR2_INFO:
1367 case CB_COLOR3_INFO:
1368 case CB_COLOR4_INFO:
1369 case CB_COLOR5_INFO:
1370 case CB_COLOR6_INFO:
1371 case CB_COLOR7_INFO:
1372 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
1373 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1374 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1375 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1377 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1381 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1382 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1384 track->cb_dirty = true;
1386 case CB_COLOR8_INFO:
1387 case CB_COLOR9_INFO:
1388 case CB_COLOR10_INFO:
1389 case CB_COLOR11_INFO:
1390 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
1391 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1392 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1393 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1395 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1399 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1400 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1402 track->cb_dirty = true;
1404 case CB_COLOR0_PITCH:
1405 case CB_COLOR1_PITCH:
1406 case CB_COLOR2_PITCH:
1407 case CB_COLOR3_PITCH:
1408 case CB_COLOR4_PITCH:
1409 case CB_COLOR5_PITCH:
1410 case CB_COLOR6_PITCH:
1411 case CB_COLOR7_PITCH:
1412 tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
1413 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1414 track->cb_dirty = true;
1416 case CB_COLOR8_PITCH:
1417 case CB_COLOR9_PITCH:
1418 case CB_COLOR10_PITCH:
1419 case CB_COLOR11_PITCH:
1420 tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
1421 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1422 track->cb_dirty = true;
1424 case CB_COLOR0_SLICE:
1425 case CB_COLOR1_SLICE:
1426 case CB_COLOR2_SLICE:
1427 case CB_COLOR3_SLICE:
1428 case CB_COLOR4_SLICE:
1429 case CB_COLOR5_SLICE:
1430 case CB_COLOR6_SLICE:
1431 case CB_COLOR7_SLICE:
1432 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
1433 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1434 track->cb_color_slice_idx[tmp] = idx;
1435 track->cb_dirty = true;
1437 case CB_COLOR8_SLICE:
1438 case CB_COLOR9_SLICE:
1439 case CB_COLOR10_SLICE:
1440 case CB_COLOR11_SLICE:
1441 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
1442 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1443 track->cb_color_slice_idx[tmp] = idx;
1444 track->cb_dirty = true;
1446 case CB_COLOR0_ATTRIB:
1447 case CB_COLOR1_ATTRIB:
1448 case CB_COLOR2_ATTRIB:
1449 case CB_COLOR3_ATTRIB:
1450 case CB_COLOR4_ATTRIB:
1451 case CB_COLOR5_ATTRIB:
1452 case CB_COLOR6_ATTRIB:
1453 case CB_COLOR7_ATTRIB:
1454 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1456 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1460 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1461 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1462 unsigned bankw, bankh, mtaspect, tile_split;
1464 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1465 &bankw, &bankh, &mtaspect,
1467 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1468 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1469 CB_BANK_WIDTH(bankw) |
1470 CB_BANK_HEIGHT(bankh) |
1471 CB_MACRO_TILE_ASPECT(mtaspect);
1474 tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
1475 track->cb_color_attrib[tmp] = ib[idx];
1476 track->cb_dirty = true;
1478 case CB_COLOR8_ATTRIB:
1479 case CB_COLOR9_ATTRIB:
1480 case CB_COLOR10_ATTRIB:
1481 case CB_COLOR11_ATTRIB:
1482 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1484 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1488 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1489 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1490 unsigned bankw, bankh, mtaspect, tile_split;
1492 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1493 &bankw, &bankh, &mtaspect,
1495 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1496 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1497 CB_BANK_WIDTH(bankw) |
1498 CB_BANK_HEIGHT(bankh) |
1499 CB_MACRO_TILE_ASPECT(mtaspect);
1502 tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
1503 track->cb_color_attrib[tmp] = ib[idx];
1504 track->cb_dirty = true;
1506 case CB_COLOR0_FMASK:
1507 case CB_COLOR1_FMASK:
1508 case CB_COLOR2_FMASK:
1509 case CB_COLOR3_FMASK:
1510 case CB_COLOR4_FMASK:
1511 case CB_COLOR5_FMASK:
1512 case CB_COLOR6_FMASK:
1513 case CB_COLOR7_FMASK:
1514 tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
1515 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1517 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1520 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1521 track->cb_color_fmask_bo[tmp] = reloc->robj;
1523 case CB_COLOR0_CMASK:
1524 case CB_COLOR1_CMASK:
1525 case CB_COLOR2_CMASK:
1526 case CB_COLOR3_CMASK:
1527 case CB_COLOR4_CMASK:
1528 case CB_COLOR5_CMASK:
1529 case CB_COLOR6_CMASK:
1530 case CB_COLOR7_CMASK:
1531 tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
1532 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1534 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1537 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1538 track->cb_color_cmask_bo[tmp] = reloc->robj;
1540 case CB_COLOR0_FMASK_SLICE:
1541 case CB_COLOR1_FMASK_SLICE:
1542 case CB_COLOR2_FMASK_SLICE:
1543 case CB_COLOR3_FMASK_SLICE:
1544 case CB_COLOR4_FMASK_SLICE:
1545 case CB_COLOR5_FMASK_SLICE:
1546 case CB_COLOR6_FMASK_SLICE:
1547 case CB_COLOR7_FMASK_SLICE:
1548 tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
1549 track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
1551 case CB_COLOR0_CMASK_SLICE:
1552 case CB_COLOR1_CMASK_SLICE:
1553 case CB_COLOR2_CMASK_SLICE:
1554 case CB_COLOR3_CMASK_SLICE:
1555 case CB_COLOR4_CMASK_SLICE:
1556 case CB_COLOR5_CMASK_SLICE:
1557 case CB_COLOR6_CMASK_SLICE:
1558 case CB_COLOR7_CMASK_SLICE:
1559 tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
1560 track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
1562 case CB_COLOR0_BASE:
1563 case CB_COLOR1_BASE:
1564 case CB_COLOR2_BASE:
1565 case CB_COLOR3_BASE:
1566 case CB_COLOR4_BASE:
1567 case CB_COLOR5_BASE:
1568 case CB_COLOR6_BASE:
1569 case CB_COLOR7_BASE:
1570 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1572 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1576 tmp = (reg - CB_COLOR0_BASE) / 0x3c;
1577 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1578 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1579 track->cb_color_bo[tmp] = reloc->robj;
1580 track->cb_dirty = true;
1582 case CB_COLOR8_BASE:
1583 case CB_COLOR9_BASE:
1584 case CB_COLOR10_BASE:
1585 case CB_COLOR11_BASE:
1586 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1588 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1592 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
1593 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1594 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1595 track->cb_color_bo[tmp] = reloc->robj;
1596 track->cb_dirty = true;
1598 case DB_HTILE_DATA_BASE:
1599 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1601 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1605 track->htile_offset = radeon_get_ib_value(p, idx);
1606 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1607 track->htile_bo = reloc->robj;
1608 track->db_dirty = true;
1610 case DB_HTILE_SURFACE:
1612 track->htile_surface = radeon_get_ib_value(p, idx);
1613 /* force 8x8 htile width and height */
1615 track->db_dirty = true;
1617 case CB_IMMED0_BASE:
1618 case CB_IMMED1_BASE:
1619 case CB_IMMED2_BASE:
1620 case CB_IMMED3_BASE:
1621 case CB_IMMED4_BASE:
1622 case CB_IMMED5_BASE:
1623 case CB_IMMED6_BASE:
1624 case CB_IMMED7_BASE:
1625 case CB_IMMED8_BASE:
1626 case CB_IMMED9_BASE:
1627 case CB_IMMED10_BASE:
1628 case CB_IMMED11_BASE:
1629 case SQ_PGM_START_FS:
1630 case SQ_PGM_START_ES:
1631 case SQ_PGM_START_VS:
1632 case SQ_PGM_START_GS:
1633 case SQ_PGM_START_PS:
1634 case SQ_PGM_START_HS:
1635 case SQ_PGM_START_LS:
1636 case SQ_CONST_MEM_BASE:
1637 case SQ_ALU_CONST_CACHE_GS_0:
1638 case SQ_ALU_CONST_CACHE_GS_1:
1639 case SQ_ALU_CONST_CACHE_GS_2:
1640 case SQ_ALU_CONST_CACHE_GS_3:
1641 case SQ_ALU_CONST_CACHE_GS_4:
1642 case SQ_ALU_CONST_CACHE_GS_5:
1643 case SQ_ALU_CONST_CACHE_GS_6:
1644 case SQ_ALU_CONST_CACHE_GS_7:
1645 case SQ_ALU_CONST_CACHE_GS_8:
1646 case SQ_ALU_CONST_CACHE_GS_9:
1647 case SQ_ALU_CONST_CACHE_GS_10:
1648 case SQ_ALU_CONST_CACHE_GS_11:
1649 case SQ_ALU_CONST_CACHE_GS_12:
1650 case SQ_ALU_CONST_CACHE_GS_13:
1651 case SQ_ALU_CONST_CACHE_GS_14:
1652 case SQ_ALU_CONST_CACHE_GS_15:
1653 case SQ_ALU_CONST_CACHE_PS_0:
1654 case SQ_ALU_CONST_CACHE_PS_1:
1655 case SQ_ALU_CONST_CACHE_PS_2:
1656 case SQ_ALU_CONST_CACHE_PS_3:
1657 case SQ_ALU_CONST_CACHE_PS_4:
1658 case SQ_ALU_CONST_CACHE_PS_5:
1659 case SQ_ALU_CONST_CACHE_PS_6:
1660 case SQ_ALU_CONST_CACHE_PS_7:
1661 case SQ_ALU_CONST_CACHE_PS_8:
1662 case SQ_ALU_CONST_CACHE_PS_9:
1663 case SQ_ALU_CONST_CACHE_PS_10:
1664 case SQ_ALU_CONST_CACHE_PS_11:
1665 case SQ_ALU_CONST_CACHE_PS_12:
1666 case SQ_ALU_CONST_CACHE_PS_13:
1667 case SQ_ALU_CONST_CACHE_PS_14:
1668 case SQ_ALU_CONST_CACHE_PS_15:
1669 case SQ_ALU_CONST_CACHE_VS_0:
1670 case SQ_ALU_CONST_CACHE_VS_1:
1671 case SQ_ALU_CONST_CACHE_VS_2:
1672 case SQ_ALU_CONST_CACHE_VS_3:
1673 case SQ_ALU_CONST_CACHE_VS_4:
1674 case SQ_ALU_CONST_CACHE_VS_5:
1675 case SQ_ALU_CONST_CACHE_VS_6:
1676 case SQ_ALU_CONST_CACHE_VS_7:
1677 case SQ_ALU_CONST_CACHE_VS_8:
1678 case SQ_ALU_CONST_CACHE_VS_9:
1679 case SQ_ALU_CONST_CACHE_VS_10:
1680 case SQ_ALU_CONST_CACHE_VS_11:
1681 case SQ_ALU_CONST_CACHE_VS_12:
1682 case SQ_ALU_CONST_CACHE_VS_13:
1683 case SQ_ALU_CONST_CACHE_VS_14:
1684 case SQ_ALU_CONST_CACHE_VS_15:
1685 case SQ_ALU_CONST_CACHE_HS_0:
1686 case SQ_ALU_CONST_CACHE_HS_1:
1687 case SQ_ALU_CONST_CACHE_HS_2:
1688 case SQ_ALU_CONST_CACHE_HS_3:
1689 case SQ_ALU_CONST_CACHE_HS_4:
1690 case SQ_ALU_CONST_CACHE_HS_5:
1691 case SQ_ALU_CONST_CACHE_HS_6:
1692 case SQ_ALU_CONST_CACHE_HS_7:
1693 case SQ_ALU_CONST_CACHE_HS_8:
1694 case SQ_ALU_CONST_CACHE_HS_9:
1695 case SQ_ALU_CONST_CACHE_HS_10:
1696 case SQ_ALU_CONST_CACHE_HS_11:
1697 case SQ_ALU_CONST_CACHE_HS_12:
1698 case SQ_ALU_CONST_CACHE_HS_13:
1699 case SQ_ALU_CONST_CACHE_HS_14:
1700 case SQ_ALU_CONST_CACHE_HS_15:
1701 case SQ_ALU_CONST_CACHE_LS_0:
1702 case SQ_ALU_CONST_CACHE_LS_1:
1703 case SQ_ALU_CONST_CACHE_LS_2:
1704 case SQ_ALU_CONST_CACHE_LS_3:
1705 case SQ_ALU_CONST_CACHE_LS_4:
1706 case SQ_ALU_CONST_CACHE_LS_5:
1707 case SQ_ALU_CONST_CACHE_LS_6:
1708 case SQ_ALU_CONST_CACHE_LS_7:
1709 case SQ_ALU_CONST_CACHE_LS_8:
1710 case SQ_ALU_CONST_CACHE_LS_9:
1711 case SQ_ALU_CONST_CACHE_LS_10:
1712 case SQ_ALU_CONST_CACHE_LS_11:
1713 case SQ_ALU_CONST_CACHE_LS_12:
1714 case SQ_ALU_CONST_CACHE_LS_13:
1715 case SQ_ALU_CONST_CACHE_LS_14:
1716 case SQ_ALU_CONST_CACHE_LS_15:
1717 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1719 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1723 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1725 case SX_MEMORY_EXPORT_BASE:
1726 if (p->rdev->family >= CHIP_CAYMAN) {
1727 dev_warn(p->dev, "bad SET_CONFIG_REG "
1731 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1733 dev_warn(p->dev, "bad SET_CONFIG_REG "
1737 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1739 case CAYMAN_SX_SCATTER_EXPORT_BASE:
1740 if (p->rdev->family < CHIP_CAYMAN) {
1741 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1745 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1747 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1751 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1754 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1757 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1763 static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1767 if (p->rdev->family >= CHIP_CAYMAN)
1768 last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
1770 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
1773 if (i >= last_reg) {
1774 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1777 m = 1 << ((reg >> 2) & 31);
1778 if (p->rdev->family >= CHIP_CAYMAN) {
1779 if (!(cayman_reg_safe_bm[i] & m))
1782 if (!(evergreen_reg_safe_bm[i] & m))
1785 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1789 static int evergreen_packet3_check(struct radeon_cs_parser *p,
1790 struct radeon_cs_packet *pkt)
1792 struct radeon_cs_reloc *reloc;
1793 struct evergreen_cs_track *track;
1797 unsigned start_reg, end_reg, reg;
1801 track = (struct evergreen_cs_track *)p->track;
1804 idx_value = radeon_get_ib_value(p, idx);
1806 switch (pkt->opcode) {
1807 case PACKET3_SET_PREDICATION:
1813 if (pkt->count != 1) {
1814 DRM_ERROR("bad SET PREDICATION\n");
1818 tmp = radeon_get_ib_value(p, idx + 1);
1819 pred_op = (tmp >> 16) & 0x7;
1821 /* for the clear predicate operation */
1826 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1830 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1832 DRM_ERROR("bad SET PREDICATION\n");
1836 offset = reloc->lobj.gpu_offset +
1837 (idx_value & 0xfffffff0) +
1838 ((u64)(tmp & 0xff) << 32);
1840 ib[idx + 0] = offset;
1841 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1844 case PACKET3_CONTEXT_CONTROL:
1845 if (pkt->count != 1) {
1846 DRM_ERROR("bad CONTEXT_CONTROL\n");
1850 case PACKET3_INDEX_TYPE:
1851 case PACKET3_NUM_INSTANCES:
1852 case PACKET3_CLEAR_STATE:
1854 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1858 case CAYMAN_PACKET3_DEALLOC_STATE:
1859 if (p->rdev->family < CHIP_CAYMAN) {
1860 DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
1864 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1868 case PACKET3_INDEX_BASE:
1872 if (pkt->count != 1) {
1873 DRM_ERROR("bad INDEX_BASE\n");
1876 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1878 DRM_ERROR("bad INDEX_BASE\n");
1882 offset = reloc->lobj.gpu_offset +
1884 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1887 ib[idx+1] = upper_32_bits(offset) & 0xff;
1889 r = evergreen_cs_track_check(p);
1891 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1896 case PACKET3_DRAW_INDEX:
1899 if (pkt->count != 3) {
1900 DRM_ERROR("bad DRAW_INDEX\n");
1903 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1905 DRM_ERROR("bad DRAW_INDEX\n");
1909 offset = reloc->lobj.gpu_offset +
1911 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1914 ib[idx+1] = upper_32_bits(offset) & 0xff;
1916 r = evergreen_cs_track_check(p);
1918 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1923 case PACKET3_DRAW_INDEX_2:
1927 if (pkt->count != 4) {
1928 DRM_ERROR("bad DRAW_INDEX_2\n");
1931 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1933 DRM_ERROR("bad DRAW_INDEX_2\n");
1937 offset = reloc->lobj.gpu_offset +
1938 radeon_get_ib_value(p, idx+1) +
1939 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1942 ib[idx+2] = upper_32_bits(offset) & 0xff;
1944 r = evergreen_cs_track_check(p);
1946 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1951 case PACKET3_DRAW_INDEX_AUTO:
1952 if (pkt->count != 1) {
1953 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1956 r = evergreen_cs_track_check(p);
1958 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1962 case PACKET3_DRAW_INDEX_MULTI_AUTO:
1963 if (pkt->count != 2) {
1964 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1967 r = evergreen_cs_track_check(p);
1969 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1973 case PACKET3_DRAW_INDEX_IMMD:
1974 if (pkt->count < 2) {
1975 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1978 r = evergreen_cs_track_check(p);
1980 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1984 case PACKET3_DRAW_INDEX_OFFSET:
1985 if (pkt->count != 2) {
1986 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1989 r = evergreen_cs_track_check(p);
1991 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1995 case PACKET3_DRAW_INDEX_OFFSET_2:
1996 if (pkt->count != 3) {
1997 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
2000 r = evergreen_cs_track_check(p);
2002 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2006 case PACKET3_DISPATCH_DIRECT:
2007 if (pkt->count != 3) {
2008 DRM_ERROR("bad DISPATCH_DIRECT\n");
2011 r = evergreen_cs_track_check(p);
2013 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2017 case PACKET3_DISPATCH_INDIRECT:
2018 if (pkt->count != 1) {
2019 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2022 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2024 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2027 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
2028 r = evergreen_cs_track_check(p);
2030 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2034 case PACKET3_WAIT_REG_MEM:
2035 if (pkt->count != 5) {
2036 DRM_ERROR("bad WAIT_REG_MEM\n");
2039 /* bit 4 is reg (0) or mem (1) */
2040 if (idx_value & 0x10) {
2043 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2045 DRM_ERROR("bad WAIT_REG_MEM\n");
2049 offset = reloc->lobj.gpu_offset +
2050 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2051 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2053 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
2054 ib[idx+2] = upper_32_bits(offset) & 0xff;
2055 } else if (idx_value & 0x100) {
2056 DRM_ERROR("cannot use PFP on REG wait\n");
2060 case PACKET3_CP_DMA:
2062 u32 command, size, info;
2064 if (pkt->count != 4) {
2065 DRM_ERROR("bad CP DMA\n");
2068 command = radeon_get_ib_value(p, idx+4);
2069 size = command & 0x1fffff;
2070 info = radeon_get_ib_value(p, idx+1);
2071 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
2072 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
2073 ((((info & 0x00300000) >> 20) == 0) &&
2074 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
2075 ((((info & 0x60000000) >> 29) == 0) &&
2076 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
2077 /* non mem to mem copies requires dw aligned count */
2079 DRM_ERROR("CP DMA command requires dw count alignment\n");
2083 if (command & PACKET3_CP_DMA_CMD_SAS) {
2084 /* src address space is register */
2086 if (((info & 0x60000000) >> 29) != 1) {
2087 DRM_ERROR("CP DMA SAS not supported\n");
2091 if (command & PACKET3_CP_DMA_CMD_SAIC) {
2092 DRM_ERROR("CP DMA SAIC only supported for registers\n");
2095 /* src address space is memory */
2096 if (((info & 0x60000000) >> 29) == 0) {
2097 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2099 DRM_ERROR("bad CP DMA SRC\n");
2103 tmp = radeon_get_ib_value(p, idx) +
2104 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
2106 offset = reloc->lobj.gpu_offset + tmp;
2108 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2109 dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
2110 tmp + size, radeon_bo_size(reloc->robj));
2115 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2116 } else if (((info & 0x60000000) >> 29) != 2) {
2117 DRM_ERROR("bad CP DMA SRC_SEL\n");
2121 if (command & PACKET3_CP_DMA_CMD_DAS) {
2122 /* dst address space is register */
2124 if (((info & 0x00300000) >> 20) != 1) {
2125 DRM_ERROR("CP DMA DAS not supported\n");
2129 /* dst address space is memory */
2130 if (command & PACKET3_CP_DMA_CMD_DAIC) {
2131 DRM_ERROR("CP DMA DAIC only supported for registers\n");
2134 if (((info & 0x00300000) >> 20) == 0) {
2135 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2137 DRM_ERROR("bad CP DMA DST\n");
2141 tmp = radeon_get_ib_value(p, idx+2) +
2142 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
2144 offset = reloc->lobj.gpu_offset + tmp;
2146 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2147 dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
2148 tmp + size, radeon_bo_size(reloc->robj));
2153 ib[idx+3] = upper_32_bits(offset) & 0xff;
2155 DRM_ERROR("bad CP DMA DST_SEL\n");
2161 case PACKET3_SURFACE_SYNC:
2162 if (pkt->count != 3) {
2163 DRM_ERROR("bad SURFACE_SYNC\n");
2166 /* 0xffffffff/0x0 is flush all cache flag */
2167 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
2168 radeon_get_ib_value(p, idx + 2) != 0) {
2169 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2171 DRM_ERROR("bad SURFACE_SYNC\n");
2174 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2177 case PACKET3_EVENT_WRITE:
2178 if (pkt->count != 2 && pkt->count != 0) {
2179 DRM_ERROR("bad EVENT_WRITE\n");
2185 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2187 DRM_ERROR("bad EVENT_WRITE\n");
2190 offset = reloc->lobj.gpu_offset +
2191 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
2192 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2194 ib[idx+1] = offset & 0xfffffff8;
2195 ib[idx+2] = upper_32_bits(offset) & 0xff;
2198 case PACKET3_EVENT_WRITE_EOP:
2202 if (pkt->count != 4) {
2203 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2206 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2208 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2212 offset = reloc->lobj.gpu_offset +
2213 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2214 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2216 ib[idx+1] = offset & 0xfffffffc;
2217 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2220 case PACKET3_EVENT_WRITE_EOS:
2224 if (pkt->count != 3) {
2225 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2228 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2230 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2234 offset = reloc->lobj.gpu_offset +
2235 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2236 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2238 ib[idx+1] = offset & 0xfffffffc;
2239 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2242 case PACKET3_SET_CONFIG_REG:
2243 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
2244 end_reg = 4 * pkt->count + start_reg - 4;
2245 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
2246 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2247 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2248 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2251 for (i = 0; i < pkt->count; i++) {
2252 reg = start_reg + (4 * i);
2253 r = evergreen_cs_check_reg(p, reg, idx+1+i);
2258 case PACKET3_SET_CONTEXT_REG:
2259 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
2260 end_reg = 4 * pkt->count + start_reg - 4;
2261 if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
2262 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
2263 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
2264 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
2267 for (i = 0; i < pkt->count; i++) {
2268 reg = start_reg + (4 * i);
2269 r = evergreen_cs_check_reg(p, reg, idx+1+i);
2274 case PACKET3_SET_RESOURCE:
2275 if (pkt->count % 8) {
2276 DRM_ERROR("bad SET_RESOURCE\n");
2279 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
2280 end_reg = 4 * pkt->count + start_reg - 4;
2281 if ((start_reg < PACKET3_SET_RESOURCE_START) ||
2282 (start_reg >= PACKET3_SET_RESOURCE_END) ||
2283 (end_reg >= PACKET3_SET_RESOURCE_END)) {
2284 DRM_ERROR("bad SET_RESOURCE\n");
2287 for (i = 0; i < (pkt->count / 8); i++) {
2288 struct radeon_bo *texture, *mipmap;
2289 u32 toffset, moffset;
2290 u32 size, offset, mip_address, tex_dim;
2292 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
2293 case SQ_TEX_VTX_VALID_TEXTURE:
2295 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2297 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2300 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
2301 ib[idx+1+(i*8)+1] |=
2302 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
2303 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
2304 unsigned bankw, bankh, mtaspect, tile_split;
2306 evergreen_tiling_fields(reloc->lobj.tiling_flags,
2307 &bankw, &bankh, &mtaspect,
2309 ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
2310 ib[idx+1+(i*8)+7] |=
2311 TEX_BANK_WIDTH(bankw) |
2312 TEX_BANK_HEIGHT(bankh) |
2313 MACRO_TILE_ASPECT(mtaspect) |
2314 TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
2317 texture = reloc->robj;
2318 toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2321 tex_dim = ib[idx+1+(i*8)+0] & 0x7;
2322 mip_address = ib[idx+1+(i*8)+3];
2324 if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
2326 !radeon_cs_packet_next_is_pkt3_nop(p)) {
2327 /* MIP_ADDRESS should point to FMASK for an MSAA texture.
2328 * It should be 0 if FMASK is disabled. */
2332 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2334 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2337 moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2338 mipmap = reloc->robj;
2341 r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
2344 ib[idx+1+(i*8)+2] += toffset;
2345 ib[idx+1+(i*8)+3] += moffset;
2347 case SQ_TEX_VTX_VALID_BUFFER:
2351 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2353 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
2356 offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
2357 size = radeon_get_ib_value(p, idx+1+(i*8)+1);
2358 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
2359 /* force size to size of the buffer */
2360 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
2361 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
2364 offset64 = reloc->lobj.gpu_offset + offset;
2365 ib[idx+1+(i*8)+0] = offset64;
2366 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2367 (upper_32_bits(offset64) & 0xff);
2370 case SQ_TEX_VTX_INVALID_TEXTURE:
2371 case SQ_TEX_VTX_INVALID_BUFFER:
2373 DRM_ERROR("bad SET_RESOURCE\n");
2378 case PACKET3_SET_ALU_CONST:
2379 /* XXX fix me ALU const buffers only */
2381 case PACKET3_SET_BOOL_CONST:
2382 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
2383 end_reg = 4 * pkt->count + start_reg - 4;
2384 if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
2385 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2386 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2387 DRM_ERROR("bad SET_BOOL_CONST\n");
2391 case PACKET3_SET_LOOP_CONST:
2392 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
2393 end_reg = 4 * pkt->count + start_reg - 4;
2394 if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
2395 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2396 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2397 DRM_ERROR("bad SET_LOOP_CONST\n");
2401 case PACKET3_SET_CTL_CONST:
2402 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
2403 end_reg = 4 * pkt->count + start_reg - 4;
2404 if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
2405 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2406 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2407 DRM_ERROR("bad SET_CTL_CONST\n");
2411 case PACKET3_SET_SAMPLER:
2412 if (pkt->count % 3) {
2413 DRM_ERROR("bad SET_SAMPLER\n");
2416 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
2417 end_reg = 4 * pkt->count + start_reg - 4;
2418 if ((start_reg < PACKET3_SET_SAMPLER_START) ||
2419 (start_reg >= PACKET3_SET_SAMPLER_END) ||
2420 (end_reg >= PACKET3_SET_SAMPLER_END)) {
2421 DRM_ERROR("bad SET_SAMPLER\n");
2425 case PACKET3_STRMOUT_BUFFER_UPDATE:
2426 if (pkt->count != 4) {
2427 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2430 /* Updating memory at DST_ADDRESS. */
2431 if (idx_value & 0x1) {
2433 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2435 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2438 offset = radeon_get_ib_value(p, idx+1);
2439 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2440 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2441 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
2442 offset + 4, radeon_bo_size(reloc->robj));
2445 offset += reloc->lobj.gpu_offset;
2447 ib[idx+2] = upper_32_bits(offset) & 0xff;
2449 /* Reading data from SRC_ADDRESS. */
2450 if (((idx_value >> 1) & 0x3) == 2) {
2452 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2454 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2457 offset = radeon_get_ib_value(p, idx+3);
2458 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2459 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2460 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
2461 offset + 4, radeon_bo_size(reloc->robj));
2464 offset += reloc->lobj.gpu_offset;
2466 ib[idx+4] = upper_32_bits(offset) & 0xff;
2469 case PACKET3_MEM_WRITE:
2473 if (pkt->count != 3) {
2474 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2477 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2479 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2482 offset = radeon_get_ib_value(p, idx+0);
2483 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2485 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2488 if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2489 DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
2490 offset + 8, radeon_bo_size(reloc->robj));
2493 offset += reloc->lobj.gpu_offset;
2495 ib[idx+1] = upper_32_bits(offset) & 0xff;
2498 case PACKET3_COPY_DW:
2499 if (pkt->count != 4) {
2500 DRM_ERROR("bad COPY_DW (invalid count)\n");
2503 if (idx_value & 0x1) {
2505 /* SRC is memory. */
2506 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2508 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2511 offset = radeon_get_ib_value(p, idx+1);
2512 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2513 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2514 DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
2515 offset + 4, radeon_bo_size(reloc->robj));
2518 offset += reloc->lobj.gpu_offset;
2520 ib[idx+2] = upper_32_bits(offset) & 0xff;
2523 reg = radeon_get_ib_value(p, idx+1) << 2;
2524 if (!evergreen_is_safe_reg(p, reg, idx+1))
2527 if (idx_value & 0x2) {
2529 /* DST is memory. */
2530 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2532 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2535 offset = radeon_get_ib_value(p, idx+3);
2536 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2537 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2538 DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
2539 offset + 4, radeon_bo_size(reloc->robj));
2542 offset += reloc->lobj.gpu_offset;
2544 ib[idx+4] = upper_32_bits(offset) & 0xff;
2547 reg = radeon_get_ib_value(p, idx+3) << 2;
2548 if (!evergreen_is_safe_reg(p, reg, idx+3))
2555 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2561 int evergreen_cs_parse(struct radeon_cs_parser *p)
2563 struct radeon_cs_packet pkt;
2564 struct evergreen_cs_track *track;
2568 if (p->track == NULL) {
2569 /* initialize tracker, we are in kms */
2570 track = kzalloc(sizeof(*track), GFP_KERNEL);
2573 evergreen_cs_track_init(track);
2574 if (p->rdev->family >= CHIP_CAYMAN)
2575 tmp = p->rdev->config.cayman.tile_config;
2577 tmp = p->rdev->config.evergreen.tile_config;
2579 switch (tmp & 0xf) {
2595 switch ((tmp & 0xf0) >> 4) {
2608 switch ((tmp & 0xf00) >> 8) {
2610 track->group_size = 256;
2614 track->group_size = 512;
2618 switch ((tmp & 0xf000) >> 12) {
2620 track->row_size = 1;
2624 track->row_size = 2;
2627 track->row_size = 4;
2634 r = radeon_cs_packet_parse(p, &pkt, p->idx);
2640 p->idx += pkt.count + 2;
2642 case RADEON_PACKET_TYPE0:
2643 r = evergreen_cs_parse_packet0(p, &pkt);
2645 case RADEON_PACKET_TYPE2:
2647 case RADEON_PACKET_TYPE3:
2648 r = evergreen_packet3_check(p, &pkt);
2651 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2661 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2663 for (r = 0; r < p->ib.length_dw; r++) {
2664 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
2674 * evergreen_dma_cs_parse() - parse the DMA IB
2675 * @p: parser structure holding parsing context.
2677 * Parses the DMA IB from the CS ioctl and updates
2678 * the GPU addresses based on the reloc information and
2679 * checks for errors. (Evergreen-Cayman)
2680 * Returns 0 for success and an error on failure.
2682 int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2684 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2685 struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
2686 u32 header, cmd, count, sub_cmd;
2687 volatile u32 *ib = p->ib.ptr;
2689 u64 src_offset, dst_offset, dst2_offset;
2693 if (p->idx >= ib_chunk->length_dw) {
2694 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2695 p->idx, ib_chunk->length_dw);
2699 header = radeon_get_ib_value(p, idx);
2700 cmd = GET_DMA_CMD(header);
2701 count = GET_DMA_COUNT(header);
2702 sub_cmd = GET_DMA_SUB_CMD(header);
2705 case DMA_PACKET_WRITE:
2706 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2708 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2714 dst_offset = radeon_get_ib_value(p, idx+1);
2717 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2718 p->idx += count + 7;
2722 dst_offset = radeon_get_ib_value(p, idx+1);
2723 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2725 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2726 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2727 p->idx += count + 3;
2730 DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
2733 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2734 dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
2735 dst_offset, radeon_bo_size(dst_reloc->robj));
2739 case DMA_PACKET_COPY:
2740 r = r600_dma_cs_next_reloc(p, &src_reloc);
2742 DRM_ERROR("bad DMA_PACKET_COPY\n");
2745 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2747 DRM_ERROR("bad DMA_PACKET_COPY\n");
2751 /* Copy L2L, DW aligned */
2754 src_offset = radeon_get_ib_value(p, idx+2);
2755 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2756 dst_offset = radeon_get_ib_value(p, idx+1);
2757 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2758 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2759 dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
2760 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2763 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2764 dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
2765 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2768 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2769 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2770 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2771 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2777 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
2778 /* tiled src, linear dst */
2779 src_offset = radeon_get_ib_value(p, idx+1);
2781 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2783 dst_offset = radeon_get_ib_value(p, idx + 7);
2784 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
2785 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2786 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2788 /* linear src, tiled dst */
2789 src_offset = radeon_get_ib_value(p, idx+7);
2790 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
2791 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2792 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2794 dst_offset = radeon_get_ib_value(p, idx+1);
2796 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2798 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2799 dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
2800 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2803 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2804 dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
2805 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2810 /* Copy L2L, byte aligned */
2813 src_offset = radeon_get_ib_value(p, idx+2);
2814 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2815 dst_offset = radeon_get_ib_value(p, idx+1);
2816 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2817 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
2818 dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
2819 src_offset + count, radeon_bo_size(src_reloc->robj));
2822 if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
2823 dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
2824 dst_offset + count, radeon_bo_size(dst_reloc->robj));
2827 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
2828 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
2829 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2830 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2833 /* Copy L2L, partial */
2836 if (p->family < CHIP_CAYMAN) {
2837 DRM_ERROR("L2L Partial is cayman only !\n");
2840 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
2841 ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2842 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
2843 ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2847 /* Copy L2L, DW aligned, broadcast */
2849 /* L2L, dw, broadcast */
2850 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2852 DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
2855 dst_offset = radeon_get_ib_value(p, idx+1);
2856 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2857 dst2_offset = radeon_get_ib_value(p, idx+2);
2858 dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
2859 src_offset = radeon_get_ib_value(p, idx+3);
2860 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2861 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2862 dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
2863 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2866 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2867 dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
2868 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2871 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2872 dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
2873 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2876 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2877 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
2878 ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2879 ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2880 ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
2881 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2884 /* Copy L2T Frame to Field */
2886 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
2887 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2890 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2892 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2895 dst_offset = radeon_get_ib_value(p, idx+1);
2897 dst2_offset = radeon_get_ib_value(p, idx+2);
2899 src_offset = radeon_get_ib_value(p, idx+8);
2900 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
2901 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2902 dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
2903 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2906 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2907 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
2908 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2911 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2912 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
2913 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2916 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2917 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2918 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2919 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2922 /* Copy L2T/T2L, partial */
2924 /* L2T, T2L partial */
2925 if (p->family < CHIP_CAYMAN) {
2926 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
2930 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
2931 /* tiled src, linear dst */
2932 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2934 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2935 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2937 /* linear src, tiled dst */
2938 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2939 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2941 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2945 /* Copy L2T broadcast */
2947 /* L2T, broadcast */
2948 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
2949 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2952 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2954 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2957 dst_offset = radeon_get_ib_value(p, idx+1);
2959 dst2_offset = radeon_get_ib_value(p, idx+2);
2961 src_offset = radeon_get_ib_value(p, idx+8);
2962 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
2963 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2964 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
2965 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2968 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2969 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
2970 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2973 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2974 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
2975 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2978 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2979 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2980 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2981 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2984 /* Copy L2T/T2L (tile units) */
2988 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
2989 /* tiled src, linear dst */
2990 src_offset = radeon_get_ib_value(p, idx+1);
2992 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2994 dst_offset = radeon_get_ib_value(p, idx+7);
2995 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
2996 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2997 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2999 /* linear src, tiled dst */
3000 src_offset = radeon_get_ib_value(p, idx+7);
3001 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3002 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3003 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3005 dst_offset = radeon_get_ib_value(p, idx+1);
3007 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3009 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3010 dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
3011 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3014 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3015 dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
3016 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3021 /* Copy T2T, partial (tile units) */
3024 if (p->family < CHIP_CAYMAN) {
3025 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
3028 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3029 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3032 /* Copy L2T broadcast (tile units) */
3034 /* L2T, broadcast */
3035 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
3036 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3039 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3041 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3044 dst_offset = radeon_get_ib_value(p, idx+1);
3046 dst2_offset = radeon_get_ib_value(p, idx+2);
3048 src_offset = radeon_get_ib_value(p, idx+8);
3049 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
3050 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3051 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3052 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3055 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3056 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3057 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3060 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3061 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
3062 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3065 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3066 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
3067 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3068 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3072 DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
3076 case DMA_PACKET_CONSTANT_FILL:
3077 r = r600_dma_cs_next_reloc(p, &dst_reloc);
3079 DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
3082 dst_offset = radeon_get_ib_value(p, idx+1);
3083 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
3084 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3085 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
3086 dst_offset, radeon_bo_size(dst_reloc->robj));
3089 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3090 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
3093 case DMA_PACKET_NOP:
3097 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3100 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
3102 for (r = 0; r < p->ib->length_dw; r++) {
3103 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
3111 static bool evergreen_vm_reg_valid(u32 reg)
3113 /* context regs are fine */
3117 /* check config regs */
3120 case GRBM_GFX_INDEX:
3121 case CP_STRMOUT_CNTL:
3124 case VGT_VTX_VECT_EJECT_REG:
3125 case VGT_CACHE_INVALIDATION:
3126 case VGT_GS_VERTEX_REUSE:
3127 case VGT_PRIMITIVE_TYPE:
3128 case VGT_INDEX_TYPE:
3129 case VGT_NUM_INDICES:
3130 case VGT_NUM_INSTANCES:
3131 case VGT_COMPUTE_DIM_X:
3132 case VGT_COMPUTE_DIM_Y:
3133 case VGT_COMPUTE_DIM_Z:
3134 case VGT_COMPUTE_START_X:
3135 case VGT_COMPUTE_START_Y:
3136 case VGT_COMPUTE_START_Z:
3137 case VGT_COMPUTE_INDEX:
3138 case VGT_COMPUTE_THREAD_GROUP_SIZE:
3139 case VGT_HS_OFFCHIP_PARAM:
3141 case PA_SU_LINE_STIPPLE_VALUE:
3142 case PA_SC_LINE_STIPPLE_STATE:
3144 case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
3145 case SQ_DYN_GPR_SIMD_LOCK_EN:
3147 case SQ_GPR_RESOURCE_MGMT_1:
3148 case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
3149 case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
3150 case SQ_CONST_MEM_BASE:
3151 case SQ_STATIC_THREAD_MGMT_1:
3152 case SQ_STATIC_THREAD_MGMT_2:
3153 case SQ_STATIC_THREAD_MGMT_3:
3154 case SPI_CONFIG_CNTL:
3155 case SPI_CONFIG_CNTL_1:
3162 case TD_PS_BORDER_COLOR_INDEX:
3163 case TD_PS_BORDER_COLOR_RED:
3164 case TD_PS_BORDER_COLOR_GREEN:
3165 case TD_PS_BORDER_COLOR_BLUE:
3166 case TD_PS_BORDER_COLOR_ALPHA:
3167 case TD_VS_BORDER_COLOR_INDEX:
3168 case TD_VS_BORDER_COLOR_RED:
3169 case TD_VS_BORDER_COLOR_GREEN:
3170 case TD_VS_BORDER_COLOR_BLUE:
3171 case TD_VS_BORDER_COLOR_ALPHA:
3172 case TD_GS_BORDER_COLOR_INDEX:
3173 case TD_GS_BORDER_COLOR_RED:
3174 case TD_GS_BORDER_COLOR_GREEN:
3175 case TD_GS_BORDER_COLOR_BLUE:
3176 case TD_GS_BORDER_COLOR_ALPHA:
3177 case TD_HS_BORDER_COLOR_INDEX:
3178 case TD_HS_BORDER_COLOR_RED:
3179 case TD_HS_BORDER_COLOR_GREEN:
3180 case TD_HS_BORDER_COLOR_BLUE:
3181 case TD_HS_BORDER_COLOR_ALPHA:
3182 case TD_LS_BORDER_COLOR_INDEX:
3183 case TD_LS_BORDER_COLOR_RED:
3184 case TD_LS_BORDER_COLOR_GREEN:
3185 case TD_LS_BORDER_COLOR_BLUE:
3186 case TD_LS_BORDER_COLOR_ALPHA:
3187 case TD_CS_BORDER_COLOR_INDEX:
3188 case TD_CS_BORDER_COLOR_RED:
3189 case TD_CS_BORDER_COLOR_GREEN:
3190 case TD_CS_BORDER_COLOR_BLUE:
3191 case TD_CS_BORDER_COLOR_ALPHA:
3192 case SQ_ESGS_RING_SIZE:
3193 case SQ_GSVS_RING_SIZE:
3194 case SQ_ESTMP_RING_SIZE:
3195 case SQ_GSTMP_RING_SIZE:
3196 case SQ_HSTMP_RING_SIZE:
3197 case SQ_LSTMP_RING_SIZE:
3198 case SQ_PSTMP_RING_SIZE:
3199 case SQ_VSTMP_RING_SIZE:
3200 case SQ_ESGS_RING_ITEMSIZE:
3201 case SQ_ESTMP_RING_ITEMSIZE:
3202 case SQ_GSTMP_RING_ITEMSIZE:
3203 case SQ_GSVS_RING_ITEMSIZE:
3204 case SQ_GS_VERT_ITEMSIZE:
3205 case SQ_GS_VERT_ITEMSIZE_1:
3206 case SQ_GS_VERT_ITEMSIZE_2:
3207 case SQ_GS_VERT_ITEMSIZE_3:
3208 case SQ_GSVS_RING_OFFSET_1:
3209 case SQ_GSVS_RING_OFFSET_2:
3210 case SQ_GSVS_RING_OFFSET_3:
3211 case SQ_HSTMP_RING_ITEMSIZE:
3212 case SQ_LSTMP_RING_ITEMSIZE:
3213 case SQ_PSTMP_RING_ITEMSIZE:
3214 case SQ_VSTMP_RING_ITEMSIZE:
3215 case VGT_TF_RING_SIZE:
3216 case SQ_ESGS_RING_BASE:
3217 case SQ_GSVS_RING_BASE:
3218 case SQ_ESTMP_RING_BASE:
3219 case SQ_GSTMP_RING_BASE:
3220 case SQ_HSTMP_RING_BASE:
3221 case SQ_LSTMP_RING_BASE:
3222 case SQ_PSTMP_RING_BASE:
3223 case SQ_VSTMP_RING_BASE:
3224 case CAYMAN_VGT_OFFCHIP_LDS_BASE:
3225 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
3228 DRM_ERROR("Invalid register 0x%x in CS\n", reg);
3233 static int evergreen_vm_packet3_check(struct radeon_device *rdev,
3234 u32 *ib, struct radeon_cs_packet *pkt)
3236 u32 idx = pkt->idx + 1;
3237 u32 idx_value = ib[idx];
3238 u32 start_reg, end_reg, reg, i;
3241 switch (pkt->opcode) {
3243 case PACKET3_SET_BASE:
3244 case PACKET3_CLEAR_STATE:
3245 case PACKET3_INDEX_BUFFER_SIZE:
3246 case PACKET3_DISPATCH_DIRECT:
3247 case PACKET3_DISPATCH_INDIRECT:
3248 case PACKET3_MODE_CONTROL:
3249 case PACKET3_SET_PREDICATION:
3250 case PACKET3_COND_EXEC:
3251 case PACKET3_PRED_EXEC:
3252 case PACKET3_DRAW_INDIRECT:
3253 case PACKET3_DRAW_INDEX_INDIRECT:
3254 case PACKET3_INDEX_BASE:
3255 case PACKET3_DRAW_INDEX_2:
3256 case PACKET3_CONTEXT_CONTROL:
3257 case PACKET3_DRAW_INDEX_OFFSET:
3258 case PACKET3_INDEX_TYPE:
3259 case PACKET3_DRAW_INDEX:
3260 case PACKET3_DRAW_INDEX_AUTO:
3261 case PACKET3_DRAW_INDEX_IMMD:
3262 case PACKET3_NUM_INSTANCES:
3263 case PACKET3_DRAW_INDEX_MULTI_AUTO:
3264 case PACKET3_STRMOUT_BUFFER_UPDATE:
3265 case PACKET3_DRAW_INDEX_OFFSET_2:
3266 case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
3267 case PACKET3_MPEG_INDEX:
3268 case PACKET3_WAIT_REG_MEM:
3269 case PACKET3_MEM_WRITE:
3270 case PACKET3_SURFACE_SYNC:
3271 case PACKET3_EVENT_WRITE:
3272 case PACKET3_EVENT_WRITE_EOP:
3273 case PACKET3_EVENT_WRITE_EOS:
3274 case PACKET3_SET_CONTEXT_REG:
3275 case PACKET3_SET_BOOL_CONST:
3276 case PACKET3_SET_LOOP_CONST:
3277 case PACKET3_SET_RESOURCE:
3278 case PACKET3_SET_SAMPLER:
3279 case PACKET3_SET_CTL_CONST:
3280 case PACKET3_SET_RESOURCE_OFFSET:
3281 case PACKET3_SET_CONTEXT_REG_INDIRECT:
3282 case PACKET3_SET_RESOURCE_INDIRECT:
3283 case CAYMAN_PACKET3_DEALLOC_STATE:
3285 case PACKET3_COND_WRITE:
3286 if (idx_value & 0x100) {
3287 reg = ib[idx + 5] * 4;
3288 if (!evergreen_vm_reg_valid(reg))
3292 case PACKET3_COPY_DW:
3293 if (idx_value & 0x2) {
3294 reg = ib[idx + 3] * 4;
3295 if (!evergreen_vm_reg_valid(reg))
3299 case PACKET3_SET_CONFIG_REG:
3300 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
3301 end_reg = 4 * pkt->count + start_reg - 4;
3302 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
3303 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
3304 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
3305 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
3308 for (i = 0; i < pkt->count; i++) {
3309 reg = start_reg + (4 * i);
3310 if (!evergreen_vm_reg_valid(reg))
3314 case PACKET3_CP_DMA:
3315 command = ib[idx + 4];
3317 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
3318 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
3319 ((((info & 0x00300000) >> 20) == 0) &&
3320 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
3321 ((((info & 0x60000000) >> 29) == 0) &&
3322 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
3323 /* non mem to mem copies requires dw aligned count */
3324 if ((command & 0x1fffff) % 4) {
3325 DRM_ERROR("CP DMA command requires dw count alignment\n");
3329 if (command & PACKET3_CP_DMA_CMD_SAS) {
3330 /* src address space is register */
3331 if (((info & 0x60000000) >> 29) == 0) {
3332 start_reg = idx_value << 2;
3333 if (command & PACKET3_CP_DMA_CMD_SAIC) {
3335 if (!evergreen_vm_reg_valid(reg)) {
3336 DRM_ERROR("CP DMA Bad SRC register\n");
3340 for (i = 0; i < (command & 0x1fffff); i++) {
3341 reg = start_reg + (4 * i);
3342 if (!evergreen_vm_reg_valid(reg)) {
3343 DRM_ERROR("CP DMA Bad SRC register\n");
3350 if (command & PACKET3_CP_DMA_CMD_DAS) {
3351 /* dst address space is register */
3352 if (((info & 0x00300000) >> 20) == 0) {
3353 start_reg = ib[idx + 2];
3354 if (command & PACKET3_CP_DMA_CMD_DAIC) {
3356 if (!evergreen_vm_reg_valid(reg)) {
3357 DRM_ERROR("CP DMA Bad DST register\n");
3361 for (i = 0; i < (command & 0x1fffff); i++) {
3362 reg = start_reg + (4 * i);
3363 if (!evergreen_vm_reg_valid(reg)) {
3364 DRM_ERROR("CP DMA Bad DST register\n");
3378 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3382 struct radeon_cs_packet pkt;
3386 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
3387 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
3390 case RADEON_PACKET_TYPE0:
3391 dev_err(rdev->dev, "Packet0 not allowed!\n");
3394 case RADEON_PACKET_TYPE2:
3397 case RADEON_PACKET_TYPE3:
3398 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
3399 ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
3400 idx += pkt.count + 2;
3403 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
3409 } while (idx < ib->length_dw);
3415 * evergreen_dma_ib_parse() - parse the DMA IB for VM
3416 * @rdev: radeon_device pointer
3417 * @ib: radeon_ib pointer
3419 * Parses the DMA IB from the VM CS ioctl
3420 * checks for errors. (Cayman-SI)
3421 * Returns 0 for success and an error on failure.
3423 int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3426 u32 header, cmd, count, sub_cmd;
3429 header = ib->ptr[idx];
3430 cmd = GET_DMA_CMD(header);
3431 count = GET_DMA_COUNT(header);
3432 sub_cmd = GET_DMA_SUB_CMD(header);
3435 case DMA_PACKET_WRITE:
3446 DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
3450 case DMA_PACKET_COPY:
3452 /* Copy L2L, DW aligned */
3460 /* Copy L2L, byte aligned */
3464 /* Copy L2L, partial */
3468 /* Copy L2L, DW aligned, broadcast */
3472 /* Copy L2T Frame to Field */
3476 /* Copy L2T/T2L, partial */
3480 /* Copy L2T broadcast */
3484 /* Copy L2T/T2L (tile units) */
3488 /* Copy T2T, partial (tile units) */
3492 /* Copy L2T broadcast (tile units) */
3497 DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
3501 case DMA_PACKET_CONSTANT_FILL:
3504 case DMA_PACKET_NOP:
3508 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3511 } while (idx < ib->length_dw);