mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
- acccess. These should be enabled by FW for target VMIDs. */
+ access. These should be enabled by FW for target VMIDs. */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
* @adev: amdgpu_device pointer
*
* Set up the number and offset of the CP scratch registers.
- * NOTE: use of CP scratch registers is a legacy inferface and
+ * NOTE: use of CP scratch registers is a legacy interface and
* is not used by default on newer asics (r6xx+). On newer asics,
* memory buffers are used for fences rather than scratch regs.
*/
* @seq: sequence number
* @flags: fence related flags
*
- * Emits a fence sequnce number on the gfx ring and flushes
+ * Emits a fence sequence number on the gfx ring and flushes
* GPU caches.
*/
static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
* @seq: sequence number
* @flags: fence related flags
*
- * Emits a fence sequnce number on the compute ring and flushes
+ * Emits a fence sequence number on the compute ring and flushes
* GPU caches.
*/
static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
* gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
*
* @ring: amdgpu_ring structure holding ring information
- * @job: job to retrive vmid from
+ * @job: job to retrieve vmid from
* @ib: amdgpu indirect buffer object
* @flags: options (AMDGPU_HAVE_CTX_SWITCH)
*
* Emits an DE (drawing engine) or CE (constant engine) IB
* on the gfx ring. IBs are usually generated by userspace
* acceleration drivers and submitted to the kernel for
- * sheduling on the ring. This function schedules the IB
+ * scheduling on the ring. This function schedules the IB
* on the gfx ring for execution by the GPU.
*/
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
/*
* CP.
- * On CIK, gfx and compute now have independant command processors.
+ * On CIK, gfx and compute now have independent command processors.
*
* GFX
* Gfx consists of a single ring and can process both gfx jobs and
ring->wptr = 0;
WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_rptr_report_addr_hi =
/**
* gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
*
- * @ring: the ring to emmit the commands to
+ * @ring: the ring to emit the commands to
*
* Sync the command pipeline with the PFP. E.g. wait for everything
* to be completed.
amdgpu_ring_write(ring, 4); /* poll interval */
if (usepfp) {
- /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ /* sync CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));