aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2021-04-20 15:26:46 -0400
committerAlex Deucher <alexander.deucher@amd.com>2021-05-10 18:06:45 -0400
commite8d7aa68c8fe68525d04a940ca59c1b5aae16b7e (patch)
treef4cdf91e9ddd8b0962530060621c3c63cb4e6189 /drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
parentMAINTAINERS: fix a few more amdgpu tree links (diff)
downloadwireguard-linux-e8d7aa68c8fe68525d04a940ca59c1b5aae16b7e.tar.xz
wireguard-linux-e8d7aa68c8fe68525d04a940ca59c1b5aae16b7e.zip
drm/amdgpu: Add graphics cache rinse packet for sdma 5.0
Add emit mem sync callback for sdma_v5_0 In amdgpu sync object test, three threads created jobs to send GFX IB and SDMA IB in sequence. After the first GFX thread joined, sometimes the third thread will reuse the same physical page to store the SDMA IB. There will be a risk that SDMA will read GFX IB in the previous physical page. So it's better to flush the cache before commit sdma IB. Acked-by: Christian König <christian.koenig@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 920fc6d4a127..d294ef6a625a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -438,6 +438,33 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
}
/**
+ * sdma_v5_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
+ *
+ * @ring: amdgpu ring pointer
+ * @job: job to retrieve vmid from
+ * @ib: IB object to schedule
+ *
+ * flush the IB by graphics cache rinse.
+ */
+static void sdma_v5_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ uint32_t gcr_cntl =
+ SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
+ SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
+ SDMA_GCR_GLI_INV(1);
+
+ /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
+ SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
+ SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
+ SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
+}
+
+/**
* sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
@@ -1643,6 +1670,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
.emit_ib = sdma_v5_0_ring_emit_ib,
+ .emit_mem_sync = sdma_v5_0_ring_emit_mem_sync,
.emit_fence = sdma_v5_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v5_0_ring_emit_vm_flush,