aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
diff options
context:
space:
mode:
authorFelix Kuehling <Felix.Kuehling@amd.com>2021-04-07 18:19:58 -0400
committerAlex Deucher <alexander.deucher@amd.com>2021-04-20 21:45:45 -0400
commitb40a6ab2cf9213923bf8e821ce7fa7f6a0a26990 (patch)
treed8462a33829f567a2b8eb777cc1f962459114a90 /drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
parentdrm/amdgpu/gmc9: remove dummy read workaround for newer chips (diff)
downloadwireguard-linux-b40a6ab2cf9213923bf8e821ce7fa7f6a0a26990.tar.xz
wireguard-linux-b40a6ab2cf9213923bf8e821ce7fa7f6a0a26990.zip
drm/amdkfd: Use drm_priv to pass VM from KFD to amdgpu
amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu needs the drm_priv to allow mmap to access the BO through the corresponding file descriptor. The VM can also be extracted from drm_priv, so drm_priv can replace the vm parameter in the kfd2kgd interface. Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Philip Yang <philip.yang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c69
1 files changed, 41 insertions, 28 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 7d4118c8128a..dc86faa03b88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -948,6 +948,13 @@ static int process_update_pds(struct amdkfd_process_info *process_info,
return 0;
}
+static struct amdgpu_vm *drm_priv_to_vm(struct drm_file *drm_priv)
+{
+ struct amdgpu_fpriv *fpriv = drm_priv->driver_priv;
+
+ return &fpriv->vm;
+}
+
static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
struct dma_fence **ef)
{
@@ -1036,15 +1043,19 @@ create_evict_fence_fail:
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
struct file *filp, u32 pasid,
- void **vm, void **process_info,
+ void **process_info,
struct dma_fence **ef)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct drm_file *drm_priv = filp->private_data;
- struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
- struct amdgpu_vm *avm = &drv_priv->vm;
+ struct amdgpu_fpriv *drv_priv;
+ struct amdgpu_vm *avm;
int ret;
+ ret = amdgpu_file_to_fpriv(filp, &drv_priv);
+ if (ret)
+ return ret;
+ avm = &drv_priv->vm;
+
/* Already a compute VM? */
if (avm->process_info)
return -EINVAL;
@@ -1059,7 +1070,7 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
if (ret)
return ret;
- *vm = (void *)avm;
+ amdgpu_vm_set_task_info(avm);
return 0;
}
@@ -1100,15 +1111,17 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
}
}
-void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
+void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ struct amdgpu_vm *avm;
- if (WARN_ON(!kgd || !vm))
+ if (WARN_ON(!kgd || !drm_priv))
return;
- pr_debug("Releasing process vm %p\n", vm);
+ avm = drm_priv_to_vm(drm_priv);
+
+ pr_debug("Releasing process vm %p\n", avm);
/* The original pasid of amdgpu vm has already been
* released during making a amdgpu vm to a compute vm
@@ -1119,9 +1132,9 @@ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
amdgpu_vm_release_compute(adev, avm);
}
-uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
+uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
{
- struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct amdgpu_bo *pd = avm->root.base.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
@@ -1132,11 +1145,11 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct kgd_dev *kgd, uint64_t va, uint64_t size,
- void *vm, struct kgd_mem **mem,
+ void *drm_priv, struct kgd_mem **mem,
uint64_t *offset, uint32_t flags)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
enum ttm_bo_type bo_type = ttm_bo_type_device;
struct sg_table *sg = NULL;
uint64_t user_addr = 0;
@@ -1347,10 +1360,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
}
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
+ struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
int ret;
struct amdgpu_bo *bo;
uint32_t domain;
@@ -1391,9 +1404,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
mem->va,
mem->va + bo_size * (1 + mem->aql_queue),
- vm, domain_string(domain));
+ avm, domain_string(domain));
- ret = reserve_bo_and_vm(mem, vm, &ctx);
+ ret = reserve_bo_and_vm(mem, avm, &ctx);
if (unlikely(ret))
goto out;
@@ -1437,7 +1450,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
}
list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
- if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
+ if (entry->bo_va->base.vm == avm && !entry->is_mapped) {
pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
entry->va, entry->va + bo_size,
entry);
@@ -1449,7 +1462,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
goto map_bo_to_gpuvm_failed;
}
- ret = vm_update_pds(vm, ctx.sync);
+ ret = vm_update_pds(avm, ctx.sync);
if (ret) {
pr_err("Failed to update page directories\n");
goto map_bo_to_gpuvm_failed;
@@ -1485,11 +1498,11 @@ out:
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
+ struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct amdkfd_process_info *process_info =
- ((struct amdgpu_vm *)vm)->process_info;
+ struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
+ struct amdkfd_process_info *process_info = avm->process_info;
unsigned long bo_size = mem->bo->tbo.base.size;
struct kfd_bo_va_list *entry;
struct bo_vm_reservation_context ctx;
@@ -1497,7 +1510,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
mutex_lock(&mem->lock);
- ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
+ ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
if (unlikely(ret))
goto out;
/* If no VMs were reserved, it means the BO wasn't actually mapped */
@@ -1506,17 +1519,17 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
goto unreserve_out;
}
- ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
+ ret = vm_validate_pt_pd_bos(avm);
if (unlikely(ret))
goto unreserve_out;
pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
mem->va,
mem->va + bo_size * (1 + mem->aql_queue),
- vm);
+ avm);
list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
- if (entry->bo_va->base.vm == vm && entry->is_mapped) {
+ if (entry->bo_va->base.vm == avm && entry->is_mapped) {
pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
entry->va,
entry->va + bo_size,
@@ -1642,14 +1655,14 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
struct dma_buf *dma_buf,
- uint64_t va, void *vm,
+ uint64_t va, void *drm_priv,
struct kgd_mem **mem, uint64_t *size,
uint64_t *mmap_offset)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct drm_gem_object *obj;
struct amdgpu_bo *bo;
- struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
/* Can't handle non-graphics buffers */