aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
diff options
context:
space:
mode:
authorJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2021-01-15 08:49:57 +0200
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2021-01-15 08:49:57 +0200
commitd263dfa7d2697a43f3299b9731cd568ee49cdd2c (patch)
treef5d993c7000ac41ee926acf0b357ec572ef7908f /drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
parentdrm/i915/gt: Prune inlines (diff)
parentMerge tag 'drm-intel-gt-next-2021-01-14' of git://anongit.freedesktop.org/drm/drm-intel into drm-next (diff)
downloadwireguard-linux-d263dfa7d2697a43f3299b9731cd568ee49cdd2c.tar.xz
wireguard-linux-d263dfa7d2697a43f3299b9731cd568ee49cdd2c.zip
Merge drm/drm-next into drm-intel-gt-next
Backmerging to get a common base for merging topic branches between drm-intel-next and drm-intel-gt-next. Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 9c72b95b7463..d345e324837d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -32,7 +32,6 @@
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
-#include "hdp/hdp_5_0_0_offset.h"
#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
@@ -203,7 +202,7 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_NAVI12))
return 0;
DRM_DEBUG("\n");
@@ -392,7 +391,9 @@ static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* sdma_v5_0_ring_emit_ib - Schedule an IB on the DMA engine
*
* @ring: amdgpu ring pointer
+ * @job: job to retrieve vmid from
* @ib: IB object to schedule
+ * @flags: unused
*
* Schedule an IB in the DMA ring (NAVI10).
*/
@@ -469,7 +470,9 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
* sdma_v5_0_ring_emit_fence - emit a fence on the DMA ring
*
* @ring: amdgpu ring pointer
- * @fence: amdgpu fence object
+ * @addr: address
+ * @seq: sequence number
+ * @flags: fence related flags
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
@@ -959,6 +962,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
* sdma_v5_0_ring_test_ib - test an IB on the DMA engine
*
* @ring: amdgpu_ring structure holding ring information
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Test a simple IB in the DMA ring (NAVI10).
* Returns 0 on success, error on failure.
@@ -1061,10 +1065,9 @@ static void sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib,
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
*
* Update PTEs by writing them manually using sDMA (NAVI10).
*/
@@ -1118,6 +1121,7 @@ static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
/**
* sdma_v5_0_ring_pad_ib - pad the IB
+ * @ring: amdgpu_ring structure holding ring information
* @ib: indirect buffer to fill with padding
*
* Pad the IB with NOPs to a boundary multiple of 8.
@@ -1170,7 +1174,8 @@ static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* sdma_v5_0_ring_emit_vm_flush - vm flush using sDMA
*
* @ring: amdgpu_ring pointer
- * @vm: amdgpu_vm pointer
+ * @vmid: vmid number to use
+ * @pd_addr: address
*
* Update the page table base and flush the VM TLB
* using sDMA (NAVI10).
@@ -1686,10 +1691,11 @@ static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
/**
* sdma_v5_0_emit_copy_buffer - copy buffer using the sDMA engine
*
- * @ring: amdgpu_ring structure holding ring information
+ * @ib: indirect buffer to copy to
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
+ * @tmz: if a secure copy should be used
*
* Copy GPU buffers using the DMA engine (NAVI10).
* Used by the amdgpu ttm implementation to move pages if
@@ -1715,7 +1721,7 @@ static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
/**
* sdma_v5_0_emit_fill_buffer - fill buffer using the sDMA engine
*
- * @ring: amdgpu_ring structure holding ring information
+ * @ib: indirect buffer to fill
* @src_data: value to write to buffer
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer