RAT0 is for global binding write
VTX1 is for global binding read
-for wrting images RAT1...
+for writing images RAT1...
for reading images TEX2...
TEX2-RAT1 is paired
r600_store_value(cb, 0);
/* R_008C28_SQ_STACK_RESOURCE_MGMT_3
- * Set the Contol Flow stack entries to 0 for the HS stage, and
+ * Set the Control Flow stack entries to 0 for the HS stage, and
* set it to the maximum value for the CS (aka LS) stage. */
r600_store_value(cb,
S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
slice_tile_max = (rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.u.legacy.level[src_level].nblk_y) / (8*8);
slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0;
/* linear height must be the same as the slice tile max height, it's ok even
- * if the linear destination/source have smaller heigh as the size of the
+ * if the linear destination/source have smaller height as the size of the
* dma packet will be using the copy_height which is always smaller or equal
* to the linear height
*/
slice_tile_max = (rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.u.legacy.level[dst_level].nblk_y) / (8*8);
slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0;
/* linear height must be the same as the slice tile max height, it's ok even
- * if the linear destination/source have smaller heigh as the size of the
+ * if the linear destination/source have smaller height as the size of the
* dma packet will be using the copy_height which is always smaller or equal
* to the linear height
*/
}
}
-/* alu instructions that can ony exits once per group */
+/* alu instructions that can only exits once per group */
static int is_alu_once_inst(struct r600_bytecode_alu *alu)
{
return r600_isa_alu(alu->op)->flags & (AF_KILL | AF_PRED) || alu->is_lds_idx_op || alu->op == ALU_OP0_GROUP_BARRIER;
return 0;
/* Just check every possible combination of bank swizzle.
- * Not very efficent, but works on the first try in most of the cases. */
+ * Not very efficient, but works on the first try in most of the cases. */
for (i = 0; i < 4; i++)
if (!slots[i] || !slots[i]->bank_swizzle_force || slots[i]->is_lds_idx_op)
bank_swizzle[i] = SQ_ALU_VEC_012;
if (!prev[j] || !alu_writes(prev[j]))
continue;
- /* If it's relative then we can't determin which gpr is really used. */
+ /* If it's relative then we can't determine which gpr is really used. */
if (prev[j]->dst.chan == alu->src[src].chan &&
(prev[j]->dst.sel == alu->src[src].sel ||
prev[j]->dst.rel || alu->src[src].rel))
/* looks like everything worked out right, apply the changes */
- /* undo adding previus literals */
+ /* undo adding previous literals */
bc->cf_last->ndw -= align(prev_nliteral, 2);
/* sort instructions */
unsigned r6xx_nop_after_rel_dst;
bool index_loaded[2];
unsigned index_reg[2]; /* indexing register CF_INDEX_[01] */
- unsigned index_reg_chan[2]; /* indexing register chanel CF_INDEX_[01] */
+ unsigned index_reg_chan[2]; /* indexing register channel CF_INDEX_[01] */
unsigned debug_id;
struct r600_isa* isa;
struct r600_bytecode_output pending_outputs[5];
}
}
- /* reenable compression in DB_RENDER_CONTROL */
+ /* re-enable compression in DB_RENDER_CONTROL */
rctx->db_misc_state.flush_depthstencil_through_cb = false;
r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom);
}
struct r600_common_context *rctx = (struct r600_common_context*)ctx;
struct r600_resource *rbuffer = r600_resource(resource);
- /* We currently only do anyting here for buffers */
+ /* We currently only do anything here for buffers */
if (resource->target == PIPE_BUFFER)
(void)r600_invalidate_buffer(rctx, rbuffer);
}
if (rctx->b.flags & R600_CONTEXT_INV_CONST_CACHE) {
/* Direct constant addressing uses the shader cache.
- * Indirect contant addressing uses the vertex cache. */
+ * Indirect constant addressing uses the vertex cache. */
cp_coher_cntl |= S_0085F0_SH_ACTION_ENA(1) |
(rctx->has_vertex_cache ? S_0085F0_VC_ACTION_ENA(1)
: S_0085F0_TC_ACTION_ENA(1));
case PIPE_CAP_TWO_SIDED_COLOR:
return !is_nir_enabled(&rscreen->b);
case PIPE_CAP_INT64_DIVMOD:
- /* it is actually not supported, but the nir lowering handles this corectly wheras
+ /* it is actually not supported, but the nir lowering handles this correctly whereas
* the glsl lowering path seems to not initialize the buildins correctly.
*/
return is_nir_enabled(&rscreen->b);
* streamout, DMA, or as a random access target). The rest of
* the buffer is considered invalid and can be mapped unsynchronized.
*
- * This allows unsychronized mapping of a buffer range which hasn't
+ * This allows unsynchronized mapping of a buffer range which hasn't
* been used yet. It's for applications which forget to use
* the unsynchronized map flag and expect the driver to figure it out.
*/
sel->nir_blob = NULL;
}
sel->nir = tgsi_to_nir(sel->tokens, ctx->screen, true);
- /* Lower int64 ops because we have some r600 build-in shaders that use it */
+ /* Lower int64 ops because we have some r600 built-in shaders that use it */
if (nir_options->lower_int64_options) {
NIR_PASS_V(sel->nir, nir_lower_regs_to_ssa);
NIR_PASS_V(sel->nir, nir_lower_alu_to_scalar, r600_lower_to_scalar_instr_filter, NULL);
* calculated from the MBCNT instructions.
* Then the shader engine ID is multiplied by 256,
* and the wave id is added.
- * Then the result is multipled by 64 and thread id is
+ * Then the result is multiplied by 64 and thread id is
* added.
*/
static int load_thread_id_gpr(struct r600_shader_ctx *ctx)
if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY ||
inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY)
/* make sure array index selector is 0, this is just a safety
- * precausion because TGSI seems to emit something strange here */
+ * precaution because TGSI seems to emit something strange here */
t->src_sel_z = 4;
else
t->src_sel_z = inst->TexOffsets[0].SwizzleZ;
slice_tile_max = (rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.u.legacy.level[src_level].nblk_y) / (8*8);
slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0;
/* linear height must be the same as the slice tile max height, it's ok even
- * if the linear destination/source have smaller heigh as the size of the
+ * if the linear destination/source have smaller height as the size of the
* dma packet will be using the copy_height which is always smaller or equal
* to the linear height
*/
slice_tile_max = (rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.u.legacy.level[dst_level].nblk_y) / (8*8);
slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0;
/* linear height must be the same as the slice tile max height, it's ok even
- * if the linear destination/source have smaller heigh as the size of the
+ * if the linear destination/source have smaller height as the size of the
* dma packet will be using the copy_height which is always smaller or equal
* to the linear height
*/
unsigned id = 1;
unsigned i;
/* !!!
- * To avoid GPU lockup registers must be emited in a specific order
+ * To avoid GPU lockup registers must be emitted in a specific order
* (no kidding ...). The order below is important and have been
- * partialy infered from analyzing fglrx command stream.
+ * partially inferred from analyzing fglrx command stream.
*
* Don't reorder atom without carefully checking the effect (GPU lockup
* or piglit regression).
r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY].atom, id++, r600_emit_gs_constant_buffers, 0);
r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT].atom, id++, r600_emit_ps_constant_buffers, 0);
- /* sampler must be emited before TA_CNTL_AUX otherwise DISABLE_CUBE_WRAP change
- * does not take effect (TA_CNTL_AUX emited by r600_emit_seamless_cube_map)
+ /* sampler must be emitted before TA_CNTL_AUX otherwise DISABLE_CUBE_WRAP change
+ * does not take effect (TA_CNTL_AUX emitted by r600_emit_seamless_cube_map)
*/
r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].states.atom, id++, r600_emit_vs_sampler_states, 0);
r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].states.atom, id++, r600_emit_gs_sampler_states, 0);
* it will therefore overwrite the VS slots. If it now gets disabled,
* the VS needs to rebind all buffer/resource/sampler slots - not only
* has TES overwritten the corresponding slots, but when the VS was
- * operating as LS the things with correpsonding dirty bits got bound
+ * operating as LS the things with corresponding dirty bits got bound
* to LS slots and won't reflect what is dirty as VS stage even if the
* TES didn't overwrite it. The story for re-enabled TES is similar.
* In any case, we're not allowed to submit any TES state when
* not divisible by 8.
* Mesa conversion functions don't swap bits for those formats, and because
* we transmit this over a serial bus to the GPU (PCIe), the
- * bit-endianess is important!!!
+ * bit-endianness is important!!!
* In case we have an "opposite" format, just use that for the swizzling
* information. If we don't have such an "opposite" format, we need
* to use a fixed swizzle info instead (see below)
/*
* No need to do endian swaps on array formats,
* as mesa<-->pipe formats conversion take into account
- * the endianess
+ * the endianness
*/
return ENDIAN_NONE;
/*
* No need to do endian swaps on array formats,
* as mesa<-->pipe formats conversion take into account
- * the endianess
+ * the endianness
*/
return ENDIAN_NONE;
* First downsample the depth buffer to a temporary texture,
* then decompress the temporary one to staging.
*
- * Only the region being mapped is transfered.
+ * Only the region being mapped is transferred.
*/
struct pipe_resource resource;
* surface pitch isn't correctly aligned by default.
*
* In order to support it correctly we require multi-image
- * metadata to be syncrhonized between radv and radeonsi. The
+ * metadata to be synchronized between radv and radeonsi. The
* semantics of associating multiple image metadata to a memory
* object on the vulkan export side are not concretely defined
* either.
dpb_size += align(width_in_mb * height_in_mb * 32, alignment);
}
} else {
- // the firmware seems to allways assume a minimum of ref frames
+ // the firmware seems to always assume a minimum of ref frames
max_references = MAX2(NUM_H264_REFS, max_references);
// reference picture buffer
dpb_size = image_size * max_references;
}
case PIPE_VIDEO_FORMAT_VC1:
- // the firmware seems to allways assume a minimum of ref frames
+ // the firmware seems to always assume a minimum of ref frames
max_references = MAX2(NUM_VC1_REFS, max_references);
// reference picture buffer