drm/amd/amdgpu/gfx_v7_0: Trivial typo fixes
s/acccess/access/ s/inferface/interface/ s/sequnce/sequence/ .....two different places. s/retrive/retrieve/ s/sheduling/scheduling/ s/independant/independent/ s/wether/whether/ ......two different places. s/emmit/emit/ s/synce/sync/ Reviewed-by: Nirmoy Das<nirmoy.das@amd.com> Signed-off-by: Bhaskar Chowdhury <unixbhaskar@gmail.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
f19a2067a2
commit
63a93023ee
|
@ -1877,7 +1877,7 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
|
|||
mutex_unlock(&adev->srbm_mutex);
|
||||
|
||||
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
|
||||
acccess. These should be enabled by FW for target VMIDs. */
|
||||
access. These should be enabled by FW for target VMIDs. */
|
||||
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
|
||||
WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
|
||||
WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
|
||||
|
@ -2058,7 +2058,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
|
|||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Set up the number and offset of the CP scratch registers.
|
||||
* NOTE: use of CP scratch registers is a legacy inferface and
|
||||
* NOTE: use of CP scratch registers is a legacy interface and
|
||||
* is not used by default on newer asics (r6xx+). On newer asics,
|
||||
* memory buffers are used for fences rather than scratch regs.
|
||||
*/
|
||||
|
@ -2172,7 +2172,7 @@ static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
|
|||
* @seq: sequence number
|
||||
* @flags: fence related flags
|
||||
*
|
||||
* Emits a fence sequnce number on the gfx ring and flushes
|
||||
* Emits a fence sequence number on the gfx ring and flushes
|
||||
* GPU caches.
|
||||
*/
|
||||
static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
||||
|
@ -2215,7 +2215,7 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
|||
* @seq: sequence number
|
||||
* @flags: fence related flags
|
||||
*
|
||||
* Emits a fence sequnce number on the compute ring and flushes
|
||||
* Emits a fence sequence number on the compute ring and flushes
|
||||
* GPU caches.
|
||||
*/
|
||||
static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
|
||||
|
@ -2245,14 +2245,14 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
|
|||
* gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
|
||||
*
|
||||
* @ring: amdgpu_ring structure holding ring information
|
||||
* @job: job to retrive vmid from
|
||||
* @job: job to retrieve vmid from
|
||||
* @ib: amdgpu indirect buffer object
|
||||
* @flags: options (AMDGPU_HAVE_CTX_SWITCH)
|
||||
*
|
||||
* Emits an DE (drawing engine) or CE (constant engine) IB
|
||||
* on the gfx ring. IBs are usually generated by userspace
|
||||
* acceleration drivers and submitted to the kernel for
|
||||
* sheduling on the ring. This function schedules the IB
|
||||
* scheduling on the ring. This function schedules the IB
|
||||
* on the gfx ring for execution by the GPU.
|
||||
*/
|
||||
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
|
@ -2402,7 +2402,7 @@ err1:
|
|||
|
||||
/*
|
||||
* CP.
|
||||
* On CIK, gfx and compute now have independant command processors.
|
||||
* On CIK, gfx and compute now have independent command processors.
|
||||
*
|
||||
* GFX
|
||||
* Gfx consists of a single ring and can process both gfx jobs and
|
||||
|
@ -2630,7 +2630,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
|
|||
ring->wptr = 0;
|
||||
WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
|
||||
|
||||
/* set the wb address wether it's enabled or not */
|
||||
/* set the wb address whether it's enabled or not */
|
||||
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
|
||||
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
|
||||
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
|
||||
|
@ -2985,7 +2985,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
|
|||
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
|
||||
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
|
||||
|
||||
/* set the wb address wether it's enabled or not */
|
||||
/* set the wb address whether it's enabled or not */
|
||||
wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
|
||||
mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
|
||||
mqd->cp_hqd_pq_rptr_report_addr_hi =
|
||||
|
@ -3198,7 +3198,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
|
|||
/**
|
||||
* gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
|
||||
*
|
||||
* @ring: the ring to emmit the commands to
|
||||
* @ring: the ring to emit the commands to
|
||||
*
|
||||
* Sync the command pipeline with the PFP. E.g. wait for everything
|
||||
* to be completed.
|
||||
|
@ -3220,7 +3220,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
|
|||
amdgpu_ring_write(ring, 4); /* poll interval */
|
||||
|
||||
if (usepfp) {
|
||||
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
|
||||
/* sync CE with ME to prevent CE fetch CEIB before context switch done */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
|
|
Loading…
Reference in New Issue