2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Brad Volkin <bradley.d.volkin@intel.com>
31 * DOC: batch buffer command parser
34 * Certain OpenGL features (e.g. transform feedback, performance monitoring)
35 * require userspace code to submit batches containing commands such as
36 * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
37 * generations of the hardware will noop these commands in "unsecure" batches
38 * (which includes all userspace batches submitted via i915) even though the
39 * commands may be safe and represent the intended programming model of the
42 * The software command parser is similar in operation to the command parsing
43 * done in hardware for unsecure batches. However, the software parser allows
44 * some operations that would be noop'd by hardware, if the parser determines
45 * the operation is safe, and submits the batch as "secure" to prevent hardware
49 * At a high level, the hardware (and software) checks attempt to prevent
50 * granting userspace undue privileges. There are three categories of privilege.
52 * First, commands which are explicitly defined as privileged or which should
53 * only be used by the kernel driver. The parser generally rejects such
54 * commands, though it may allow some from the drm master process.
56 * Second, commands which access registers. To support correct/enhanced
57 * userspace functionality, particularly certain OpenGL extensions, the parser
58 * provides a whitelist of registers which userspace may safely access (for both
59 * normal and drm master processes).
61 * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
62 * The parser always rejects such commands.
64 * The majority of the problematic commands fall in the MI_* range, with only a
65 * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
68 * Each ring maintains tables of commands and registers which the parser uses in
69 * scanning batch buffers submitted to that ring.
71 * Since the set of commands that the parser must check for is significantly
72 * smaller than the number of commands supported, the parser tables contain only
73 * those commands required by the parser. This generally works because command
74 * opcode ranges have standard command length encodings. So for commands that
75 * the parser does not need to check, it can easily skip them. This is
76 * implemented via a per-ring length decoding vfunc.
78 * Unfortunately, there are a number of commands that do not follow the standard
79 * length encoding for their opcode range, primarily amongst the MI_* commands.
80 * To handle this, the parser provides a way to define explicit "skip" entries
81 * in the per-ring command tables.
83 * Other command table entries map fairly directly to high level categories
84 * mentioned above: rejected, master-only, register whitelist. The parser
85 * implements a number of checks, including the privileged memory checks, via a
86 * general bitmasking mechanism.
89 #define STD_MI_OPCODE_MASK 0xFF800000
90 #define STD_3D_OPCODE_MASK 0xFFFF0000
91 #define STD_2D_OPCODE_MASK 0xFFC00000
92 #define STD_MFX_OPCODE_MASK 0xFFFF0000
94 #define CMD(op, opm, f, lm, fl, ...) \
96 .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
97 .cmd = { (op), (opm) }, \
102 /* Convenience macros to compress the tables */
103 #define SMI STD_MI_OPCODE_MASK
104 #define S3D STD_3D_OPCODE_MASK
105 #define S2D STD_2D_OPCODE_MASK
106 #define SMFX STD_MFX_OPCODE_MASK
108 #define S CMD_DESC_SKIP
109 #define R CMD_DESC_REJECT
110 #define W CMD_DESC_REGISTER
111 #define B CMD_DESC_BITMASK
112 #define M CMD_DESC_MASTER
114 /* Command Mask Fixed Len Action
115 ---------------------------------------------------------- */
116 static const struct drm_i915_cmd_descriptor common_cmds[] = {
117 CMD( MI_NOOP, SMI, F, 1, S ),
118 CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
119 CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
120 CMD( MI_ARB_CHECK, SMI, F, 1, S ),
121 CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
122 CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
123 CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
124 CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
125 CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
126 .reg = { .offset = 1, .mask = 0x007FFFFC } ),
127 CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
128 .reg = { .offset = 1, .mask = 0x007FFFFC },
131 .mask = MI_GLOBAL_GTT,
134 CMD( MI_LOAD_REGISTER_MEM, SMI, !F, 0xFF, W | B,
135 .reg = { .offset = 1, .mask = 0x007FFFFC },
138 .mask = MI_GLOBAL_GTT,
141 CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
144 static const struct drm_i915_cmd_descriptor render_cmds[] = {
145 CMD( MI_FLUSH, SMI, F, 1, S ),
146 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
147 CMD( MI_PREDICATE, SMI, F, 1, S ),
148 CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
149 CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
150 CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
151 CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
152 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
155 .mask = MI_GLOBAL_GTT,
158 CMD( MI_UPDATE_GTT, SMI, !F, 0xFF, R ),
159 CMD( MI_CLFLUSH, SMI, !F, 0x3FF, B,
162 .mask = MI_GLOBAL_GTT,
165 CMD( MI_REPORT_PERF_COUNT, SMI, !F, 0x3F, B,
168 .mask = MI_REPORT_PERF_COUNT_GGTT,
171 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
174 .mask = MI_GLOBAL_GTT,
177 CMD( GFX_OP_3DSTATE_VF_STATISTICS, S3D, F, 1, S ),
178 CMD( PIPELINE_SELECT, S3D, F, 1, S ),
179 CMD( MEDIA_VFE_STATE, S3D, !F, 0xFFFF, B,
182 .mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
185 CMD( GPGPU_OBJECT, S3D, !F, 0xFF, S ),
186 CMD( GPGPU_WALKER, S3D, !F, 0xFF, S ),
187 CMD( GFX_OP_3DSTATE_SO_DECL_LIST, S3D, !F, 0x1FF, S ),
188 CMD( GFX_OP_PIPE_CONTROL(5), S3D, !F, 0xFF, B,
191 .mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
196 .mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
197 PIPE_CONTROL_STORE_DATA_INDEX),
199 .condition_offset = 1,
200 .condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
204 static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
205 CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
206 CMD( MI_RS_CONTROL, SMI, F, 1, S ),
207 CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
208 CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
209 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
210 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
211 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
212 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
213 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
214 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
215 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS, S3D, !F, 0x7FF, S ),
216 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS, S3D, !F, 0x7FF, S ),
218 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS, S3D, !F, 0x1FF, S ),
219 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS, S3D, !F, 0x1FF, S ),
220 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS, S3D, !F, 0x1FF, S ),
221 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS, S3D, !F, 0x1FF, S ),
222 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
225 static const struct drm_i915_cmd_descriptor video_cmds[] = {
226 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
227 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
230 .mask = MI_GLOBAL_GTT,
233 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
234 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
237 .mask = MI_FLUSH_DW_NOTIFY,
242 .mask = MI_FLUSH_DW_USE_GTT,
244 .condition_offset = 0,
245 .condition_mask = MI_FLUSH_DW_OP_MASK,
249 .mask = MI_FLUSH_DW_STORE_INDEX,
251 .condition_offset = 0,
252 .condition_mask = MI_FLUSH_DW_OP_MASK,
254 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
257 .mask = MI_GLOBAL_GTT,
261 * MFX_WAIT doesn't fit the way we handle length for most commands.
262 * It has a length field but it uses a non-standard length bias.
263 * It is always 1 dword though, so just treat it as fixed length.
265 CMD( MFX_WAIT, SMFX, F, 1, S ),
268 static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
269 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
270 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
273 .mask = MI_GLOBAL_GTT,
276 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
277 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
280 .mask = MI_FLUSH_DW_NOTIFY,
285 .mask = MI_FLUSH_DW_USE_GTT,
287 .condition_offset = 0,
288 .condition_mask = MI_FLUSH_DW_OP_MASK,
292 .mask = MI_FLUSH_DW_STORE_INDEX,
294 .condition_offset = 0,
295 .condition_mask = MI_FLUSH_DW_OP_MASK,
297 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
300 .mask = MI_GLOBAL_GTT,
305 static const struct drm_i915_cmd_descriptor blt_cmds[] = {
306 CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
307 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
310 .mask = MI_GLOBAL_GTT,
313 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
314 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
317 .mask = MI_FLUSH_DW_NOTIFY,
322 .mask = MI_FLUSH_DW_USE_GTT,
324 .condition_offset = 0,
325 .condition_mask = MI_FLUSH_DW_OP_MASK,
329 .mask = MI_FLUSH_DW_STORE_INDEX,
331 .condition_offset = 0,
332 .condition_mask = MI_FLUSH_DW_OP_MASK,
334 CMD( COLOR_BLT, S2D, !F, 0x3F, S ),
335 CMD( SRC_COPY_BLT, S2D, !F, 0x3F, S ),
338 static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
339 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
340 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
355 static const struct drm_i915_cmd_table gen7_render_cmds[] = {
356 { common_cmds, ARRAY_SIZE(common_cmds) },
357 { render_cmds, ARRAY_SIZE(render_cmds) },
360 static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
361 { common_cmds, ARRAY_SIZE(common_cmds) },
362 { render_cmds, ARRAY_SIZE(render_cmds) },
363 { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
366 static const struct drm_i915_cmd_table gen7_video_cmds[] = {
367 { common_cmds, ARRAY_SIZE(common_cmds) },
368 { video_cmds, ARRAY_SIZE(video_cmds) },
371 static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
372 { common_cmds, ARRAY_SIZE(common_cmds) },
373 { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
376 static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
377 { common_cmds, ARRAY_SIZE(common_cmds) },
378 { blt_cmds, ARRAY_SIZE(blt_cmds) },
381 static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
382 { common_cmds, ARRAY_SIZE(common_cmds) },
383 { blt_cmds, ARRAY_SIZE(blt_cmds) },
384 { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
388 * Register whitelists, sorted by increasing register offset.
390 * Some registers that userspace accesses are 64 bits. The register
391 * access commands only allow 32-bit accesses. Hence, we have to include
392 * entries for both halves of the 64-bit registers.
395 /* Convenience macro for adding 64-bit registers */
396 #define REG64(addr) (addr), (addr + sizeof(u32))
398 static const u32 gen7_render_regs[] = {
399 REG64(HS_INVOCATION_COUNT),
400 REG64(DS_INVOCATION_COUNT),
401 REG64(IA_VERTICES_COUNT),
402 REG64(IA_PRIMITIVES_COUNT),
403 REG64(VS_INVOCATION_COUNT),
404 REG64(GS_INVOCATION_COUNT),
405 REG64(GS_PRIMITIVES_COUNT),
406 REG64(CL_INVOCATION_COUNT),
407 REG64(CL_PRIMITIVES_COUNT),
408 REG64(PS_INVOCATION_COUNT),
409 REG64(PS_DEPTH_COUNT),
410 OACONTROL, /* Only allowed for LRI and SRM. See below. */
411 GEN7_3DPRIM_END_OFFSET,
412 GEN7_3DPRIM_START_VERTEX,
413 GEN7_3DPRIM_VERTEX_COUNT,
414 GEN7_3DPRIM_INSTANCE_COUNT,
415 GEN7_3DPRIM_START_INSTANCE,
416 GEN7_3DPRIM_BASE_VERTEX,
417 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
418 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
419 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
420 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
421 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)),
422 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
423 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
424 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
425 GEN7_SO_WRITE_OFFSET(0),
426 GEN7_SO_WRITE_OFFSET(1),
427 GEN7_SO_WRITE_OFFSET(2),
428 GEN7_SO_WRITE_OFFSET(3),
434 static const u32 gen7_blt_regs[] = {
438 static const u32 ivb_master_regs[] = {
441 GEN7_PIPE_DE_LOAD_SL(PIPE_A),
442 GEN7_PIPE_DE_LOAD_SL(PIPE_B),
443 GEN7_PIPE_DE_LOAD_SL(PIPE_C),
446 static const u32 hsw_master_regs[] = {
453 static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
455 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
457 (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
459 if (client == INSTR_MI_CLIENT)
461 else if (client == INSTR_RC_CLIENT) {
462 if (subclient == INSTR_MEDIA_SUBCLIENT)
468 DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
472 static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
474 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
476 (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
478 if (client == INSTR_MI_CLIENT)
480 else if (client == INSTR_RC_CLIENT) {
481 if (subclient == INSTR_MEDIA_SUBCLIENT)
487 DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
491 static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
493 u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
495 if (client == INSTR_MI_CLIENT)
497 else if (client == INSTR_BC_CLIENT)
500 DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
504 static bool validate_cmds_sorted(struct intel_engine_cs *ring,
505 const struct drm_i915_cmd_table *cmd_tables,
511 if (!cmd_tables || cmd_table_count == 0)
514 for (i = 0; i < cmd_table_count; i++) {
515 const struct drm_i915_cmd_table *table = &cmd_tables[i];
519 for (j = 0; j < table->count; j++) {
520 const struct drm_i915_cmd_descriptor *desc =
522 u32 curr = desc->cmd.value & desc->cmd.mask;
524 if (curr < previous) {
525 DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
526 ring->id, i, j, curr, previous);
537 static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
543 for (i = 0; i < reg_count; i++) {
544 u32 curr = reg_table[i];
546 if (curr < previous) {
547 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
548 ring_id, i, curr, previous);
558 static bool validate_regs_sorted(struct intel_engine_cs *ring)
560 return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
561 check_sorted(ring->id, ring->master_reg_table,
562 ring->master_reg_count);
566 const struct drm_i915_cmd_descriptor *desc;
567 struct hlist_node node;
571 * Different command ranges have different numbers of bits for the opcode. For
572 * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
573 * problem is that, for example, MI commands use bits 22:16 for other fields
574 * such as GGTT vs PPGTT bits. If we include those bits in the mask then when
575 * we mask a command from a batch it could hash to the wrong bucket due to
576 * non-opcode bits being set. But if we don't include those bits, some 3D
577 * commands may hash to the same bucket due to not including opcode bits that
578 * make the command unique. For now, we will risk hashing to the same bucket.
580 * If we attempt to generate a perfect hash, we should be able to look at bits
581 * 31:29 of a command from a batch buffer and use the full mask for that
582 * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
584 #define CMD_HASH_MASK STD_MI_OPCODE_MASK
586 static int init_hash_table(struct intel_engine_cs *ring,
587 const struct drm_i915_cmd_table *cmd_tables,
592 hash_init(ring->cmd_hash);
594 for (i = 0; i < cmd_table_count; i++) {
595 const struct drm_i915_cmd_table *table = &cmd_tables[i];
597 for (j = 0; j < table->count; j++) {
598 const struct drm_i915_cmd_descriptor *desc =
600 struct cmd_node *desc_node =
601 kmalloc(sizeof(*desc_node), GFP_KERNEL);
606 desc_node->desc = desc;
607 hash_add(ring->cmd_hash, &desc_node->node,
608 desc->cmd.value & CMD_HASH_MASK);
615 static void fini_hash_table(struct intel_engine_cs *ring)
617 struct hlist_node *tmp;
618 struct cmd_node *desc_node;
621 hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
622 hash_del(&desc_node->node);
628 * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
629 * @ring: the ringbuffer to initialize
631 * Optionally initializes fields related to batch buffer command parsing in the
632 * struct intel_engine_cs based on whether the platform requires software
635 * Return: non-zero if initialization fails
637 int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
639 const struct drm_i915_cmd_table *cmd_tables;
643 if (!IS_GEN7(ring->dev))
648 if (IS_HASWELL(ring->dev)) {
649 cmd_tables = hsw_render_ring_cmds;
651 ARRAY_SIZE(hsw_render_ring_cmds);
653 cmd_tables = gen7_render_cmds;
654 cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
657 ring->reg_table = gen7_render_regs;
658 ring->reg_count = ARRAY_SIZE(gen7_render_regs);
660 if (IS_HASWELL(ring->dev)) {
661 ring->master_reg_table = hsw_master_regs;
662 ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
664 ring->master_reg_table = ivb_master_regs;
665 ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
668 ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
671 cmd_tables = gen7_video_cmds;
672 cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
673 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
676 if (IS_HASWELL(ring->dev)) {
677 cmd_tables = hsw_blt_ring_cmds;
678 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
680 cmd_tables = gen7_blt_cmds;
681 cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
684 ring->reg_table = gen7_blt_regs;
685 ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
687 if (IS_HASWELL(ring->dev)) {
688 ring->master_reg_table = hsw_master_regs;
689 ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
691 ring->master_reg_table = ivb_master_regs;
692 ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
695 ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
698 cmd_tables = hsw_vebox_cmds;
699 cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
700 /* VECS can use the same length_mask function as VCS */
701 ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
704 DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
709 BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
710 BUG_ON(!validate_regs_sorted(ring));
712 if (hash_empty(ring->cmd_hash)) {
713 ret = init_hash_table(ring, cmd_tables, cmd_table_count);
715 DRM_ERROR("CMD: cmd_parser_init failed!\n");
716 fini_hash_table(ring);
721 ring->needs_cmd_parser = true;
727 * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
728 * @ring: the ringbuffer to clean up
730 * Releases any resources related to command parsing that may have been
731 * initialized for the specified ring.
733 void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
735 if (!ring->needs_cmd_parser)
738 fini_hash_table(ring);
741 static const struct drm_i915_cmd_descriptor*
742 find_cmd_in_table(struct intel_engine_cs *ring,
745 struct cmd_node *desc_node;
747 hash_for_each_possible(ring->cmd_hash, desc_node, node,
748 cmd_header & CMD_HASH_MASK) {
749 const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
750 u32 masked_cmd = desc->cmd.mask & cmd_header;
751 u32 masked_value = desc->cmd.value & desc->cmd.mask;
753 if (masked_cmd == masked_value)
761 * Returns a pointer to a descriptor for the command specified by cmd_header.
763 * The caller must supply space for a default descriptor via the default_desc
764 * parameter. If no descriptor for the specified command exists in the ring's
765 * command parser tables, this function fills in default_desc based on the
766 * ring's default length encoding and returns default_desc.
768 static const struct drm_i915_cmd_descriptor*
769 find_cmd(struct intel_engine_cs *ring,
771 struct drm_i915_cmd_descriptor *default_desc)
773 const struct drm_i915_cmd_descriptor *desc;
776 desc = find_cmd_in_table(ring, cmd_header);
780 mask = ring->get_cmd_length_mask(cmd_header);
784 BUG_ON(!default_desc);
785 default_desc->flags = CMD_DESC_SKIP;
786 default_desc->length.mask = mask;
791 static bool valid_reg(const u32 *table, int count, u32 addr)
793 if (table && count != 0) {
796 for (i = 0; i < count; i++) {
797 if (table[i] == addr)
805 static u32 *vmap_batch(struct drm_i915_gem_object *obj)
809 struct sg_page_iter sg_iter;
812 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
814 DRM_DEBUG_DRIVER("Failed to get space for pages\n");
819 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
820 pages[i] = sg_page_iter_page(&sg_iter);
824 addr = vmap(pages, i, 0, PAGE_KERNEL);
826 DRM_DEBUG_DRIVER("Failed to vmap pages\n");
832 drm_free_large(pages);
837 * i915_needs_cmd_parser() - should a given ring use software command parsing?
838 * @ring: the ring in question
840 * Only certain platforms require software batch buffer command parsing, and
841 * only when enabled via module parameter.
843 * Return: true if the ring requires software command parsing
845 bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
847 if (!ring->needs_cmd_parser)
850 if (!USES_PPGTT(ring->dev))
853 return (i915.enable_cmd_parser == 1);
856 static bool check_cmd(const struct intel_engine_cs *ring,
857 const struct drm_i915_cmd_descriptor *desc,
859 const bool is_master,
862 if (desc->flags & CMD_DESC_REJECT) {
863 DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
867 if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
868 DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
873 if (desc->flags & CMD_DESC_REGISTER) {
874 u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
877 * OACONTROL requires some special handling for writes. We
878 * want to make sure that any batch which enables OA also
879 * disables it before the end of the batch. The goal is to
880 * prevent one process from snooping on the perf data from
881 * another process. To do that, we need to check the value
882 * that will be written to the register. Hence, limit
883 * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
885 if (reg_addr == OACONTROL) {
886 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
887 DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
891 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
892 *oacontrol_set = (cmd[2] != 0);
895 if (!valid_reg(ring->reg_table,
896 ring->reg_count, reg_addr)) {
898 !valid_reg(ring->master_reg_table,
899 ring->master_reg_count,
901 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
910 if (desc->flags & CMD_DESC_BITMASK) {
913 for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
916 if (desc->bits[i].mask == 0)
919 if (desc->bits[i].condition_mask != 0) {
921 desc->bits[i].condition_offset;
922 u32 condition = cmd[offset] &
923 desc->bits[i].condition_mask;
929 dword = cmd[desc->bits[i].offset] &
932 if (dword != desc->bits[i].expected) {
933 DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
936 desc->bits[i].expected,
946 #define LENGTH_BIAS 2
949 * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
950 * @ring: the ring on which the batch is to execute
951 * @batch_obj: the batch buffer in question
952 * @batch_start_offset: byte offset in the batch at which execution starts
953 * @is_master: is the submitting process the drm master?
955 * Parses the specified batch buffer looking for privilege violations as
956 * described in the overview.
958 * Return: non-zero if the parser finds violations or otherwise fails
960 int i915_parse_cmds(struct intel_engine_cs *ring,
961 struct drm_i915_gem_object *batch_obj,
962 u32 batch_start_offset,
966 u32 *cmd, *batch_base, *batch_end;
967 struct drm_i915_cmd_descriptor default_desc = { 0 };
968 int needs_clflush = 0;
969 bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
971 ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
973 DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
977 batch_base = vmap_batch(batch_obj);
979 DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
980 i915_gem_object_unpin_pages(batch_obj);
985 drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
987 cmd = batch_base + (batch_start_offset / sizeof(*cmd));
988 batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
990 while (cmd < batch_end) {
991 const struct drm_i915_cmd_descriptor *desc;
994 if (*cmd == MI_BATCH_BUFFER_END)
997 desc = find_cmd(ring, *cmd, &default_desc);
999 DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
1005 if (desc->flags & CMD_DESC_FIXED)
1006 length = desc->length.fixed;
1008 length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
1010 if ((batch_end - cmd) < length) {
1011 DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
1019 if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) {
1027 if (oacontrol_set) {
1028 DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
1032 if (cmd >= batch_end) {
1033 DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
1039 i915_gem_object_unpin_pages(batch_obj);
1045 * i915_cmd_parser_get_version() - get the cmd parser version number
1047 * The cmd parser maintains a simple increasing integer version number suitable
1048 * for passing to userspace clients to determine what operations are permitted.
1050 * Return: the current version number of the cmd parser
1052 int i915_cmd_parser_get_version(void)
1055 * Command parser version history
1057 * 1. Initial version. Checks batches and reports violations, but leaves
1058 * hardware parsing enabled (so does not allow new use cases).