diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_svm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 414 |
1 files changed, 217 insertions, 197 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 16137c4247bb..7e92dcea4ce8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -193,7 +193,6 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; pr_debug("mapping to gpu idx 0x%x\n", gpuidx); pdd = kfd_process_device_from_gpuidx(p, gpuidx); @@ -201,9 +200,8 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - r = svm_range_dma_map_dev(adev, prange, offset, npages, + r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages, hmm_pfns, gpuidx); if (r) break; @@ -581,7 +579,7 @@ svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id) return NULL; } - return (struct amdgpu_device *)pdd->dev->kgd; + return pdd->dev->adev; } struct kfd_process_device * @@ -593,7 +591,7 @@ svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev) p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx); + r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx); if (r) { pr_debug("failed to get device id by adev %p\n", adev); return NULL; @@ -706,6 +704,61 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange, } } +static bool +svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange, + uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) +{ + uint32_t i; + int gpuidx; + + for (i = 0; i < nattr; i++) { + switch (attrs[i].type) { + case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: + if (prange->preferred_loc != attrs[i].value) + return false; + break; + case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: + /* Prefetch should always trigger a migration even + * if the value of the attribute didn't change. + */ + return false; + case KFD_IOCTL_SVM_ATTR_ACCESS: + case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: + case KFD_IOCTL_SVM_ATTR_NO_ACCESS: + gpuidx = kfd_process_gpuidx_from_gpuid(p, + attrs[i].value); + if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) { + if (test_bit(gpuidx, prange->bitmap_access) || + test_bit(gpuidx, prange->bitmap_aip)) + return false; + } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) { + if (!test_bit(gpuidx, prange->bitmap_access)) + return false; + } else { + if (!test_bit(gpuidx, prange->bitmap_aip)) + return false; + } + break; + case KFD_IOCTL_SVM_ATTR_SET_FLAGS: + if ((prange->flags & attrs[i].value) != attrs[i].value) + return false; + break; + case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: + if ((prange->flags & attrs[i].value) != 0) + return false; + break; + case KFD_IOCTL_SVM_ATTR_GRANULARITY: + if (prange->granularity != attrs[i].value) + return false; + break; + default: + WARN_ONCE(1, "svm_range_check_attrs wasn't called?"); + } + } + + return true; +} + /** * svm_range_debug_dump - print all range information from svms * @svms: svm range list header @@ -743,14 +796,6 @@ static void svm_range_debug_dump(struct svm_range_list *svms) } } -static bool -svm_range_is_same_attrs(struct svm_range *old, struct svm_range *new) -{ - return (old->prefetch_loc == new->prefetch_loc && - old->flags == new->flags && - old->granularity == new->granularity); -} - static int svm_range_split_array(void *ppnew, void *ppold, size_t size, uint64_t old_start, uint64_t old_n, @@ -943,7 +988,7 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last, } static int -svm_range_split_tail(struct svm_range *prange, struct svm_range *new, +svm_range_split_tail(struct svm_range *prange, uint64_t new_last, struct list_head *insert_list) { struct svm_range *tail; @@ -955,7 +1000,7 @@ svm_range_split_tail(struct svm_range *prange, struct svm_range *new, } static int -svm_range_split_head(struct svm_range *prange, struct svm_range *new, +svm_range_split_head(struct svm_range *prange, uint64_t new_start, struct list_head *insert_list) { struct svm_range *head; @@ -1053,8 +1098,8 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, if (domain == SVM_RANGE_VRAM_DOMAIN) bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); - switch (adev->asic_type) { - case CHIP_ARCTURUS: + switch (KFD_GC_VERSION(adev->kfd.dev)) { + case IP_VERSION(9, 4, 1): if (domain == SVM_RANGE_VRAM_DOMAIN) { if (bo_adev == adev) { mapping_flags |= coherent ? @@ -1070,7 +1115,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; } break; - case CHIP_ALDEBARAN: + case IP_VERSION(9, 4, 2): if (domain == SVM_RANGE_VRAM_DOMAIN) { if (bo_adev == adev) { mapping_flags |= coherent ? @@ -1129,7 +1174,6 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); struct kfd_process_device *pdd; struct dma_fence *fence = NULL; - struct amdgpu_device *adev; struct kfd_process *p; uint32_t gpuidx; int r = 0; @@ -1145,9 +1189,9 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - r = svm_range_unmap_from_gpu(adev, drm_priv_to_vm(pdd->drm_priv), + r = svm_range_unmap_from_gpu(pdd->dev->adev, + drm_priv_to_vm(pdd->drm_priv), start, last, &fence); if (r) break; @@ -1159,7 +1203,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, if (r) break; } - amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev, + amdgpu_amdkfd_flush_gpu_tlb_pasid(pdd->dev->adev, p->pasid, TLB_FLUSH_HEAVYWEIGHT); } @@ -1172,7 +1216,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned long npages, bool readonly, dma_addr_t *dma_addr, struct amdgpu_device *bo_adev, struct dma_fence **fence) { - struct amdgpu_bo_va bo_va; bool table_freed = false; uint64_t pte_flags; unsigned long last_start; @@ -1185,9 +1228,6 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms, last_start, last_start + npages - 1, readonly); - if (prange->svm_bo && prange->ttm_res) - bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev); - for (i = offset; i < offset + npages; i++) { last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN; dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN; @@ -1243,8 +1283,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct kfd_process *p; p = container_of(prange->svms, struct kfd_process, svms); - amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev, - p->pasid, TLB_FLUSH_LEGACY); + amdgpu_amdkfd_flush_gpu_tlb_pasid(adev, p->pasid, TLB_FLUSH_LEGACY); } out: return r; @@ -1257,7 +1296,6 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, { struct kfd_process_device *pdd; struct amdgpu_device *bo_adev; - struct amdgpu_device *adev; struct kfd_process *p; struct dma_fence *fence = NULL; uint32_t gpuidx; @@ -1276,19 +1314,18 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; pdd = kfd_bind_process_to_device(pdd->dev, p); if (IS_ERR(pdd)) return -EINVAL; - if (bo_adev && adev != bo_adev && - !amdgpu_xgmi_same_hive(adev, bo_adev)) { + if (bo_adev && pdd->dev->adev != bo_adev && + !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { pr_debug("cannot map to device idx %d\n", gpuidx); continue; } - r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv), + r = svm_range_map_to_gpu(pdd->dev->adev, drm_priv_to_vm(pdd->drm_priv), prange, offset, npages, readonly, prange->dma_addr[gpuidx], bo_adev, wait ? &fence : NULL); @@ -1322,7 +1359,6 @@ struct svm_validate_context { static int svm_range_reserve_bos(struct svm_validate_context *ctx) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; struct amdgpu_vm *vm; uint32_t gpuidx; int r; @@ -1334,7 +1370,6 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; vm = drm_priv_to_vm(pdd->drm_priv); ctx->tv[gpuidx].bo = &vm->root.bo->tbo; @@ -1356,9 +1391,9 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) r = -EINVAL; goto unreserve_out; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - r = amdgpu_vm_validate_pt_bos(adev, drm_priv_to_vm(pdd->drm_priv), + r = amdgpu_vm_validate_pt_bos(pdd->dev->adev, + drm_priv_to_vm(pdd->drm_priv), svm_range_bo_validate, NULL); if (r) { pr_debug("failed %d validate pt bos\n", r); @@ -1381,12 +1416,10 @@ static void svm_range_unreserve_bos(struct svm_validate_context *ctx) static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; pdd = kfd_process_device_from_gpuidx(p, gpuidx); - adev = (struct amdgpu_device *)pdd->dev->kgd; - return SVM_ADEV_PGMAP_OWNER(adev); + return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev); } /* @@ -1574,7 +1607,6 @@ retry_flush_work: static void svm_range_restore_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); - struct amdkfd_process_info *process_info; struct svm_range_list *svms; struct svm_range *prange; struct kfd_process *p; @@ -1594,12 +1626,10 @@ static void svm_range_restore_work(struct work_struct *work) * the lifetime of this thread, kfd_process and mm will be valid. */ p = container_of(svms, struct kfd_process, svms); - process_info = p->kgd_process_info; mm = p->mm; if (!mm) return; - mutex_lock(&process_info->lock); svm_range_list_lock_and_flush_work(svms, mm); mutex_lock(&svms->lock); @@ -1652,7 +1682,6 @@ static void svm_range_restore_work(struct work_struct *work) out_reschedule: mutex_unlock(&svms->lock); mmap_write_unlock(mm); - mutex_unlock(&process_info->lock); /* If validation failed, reschedule another attempt */ if (evicted_ranges) { @@ -1664,6 +1693,10 @@ out_reschedule: /** * svm_range_evict - evict svm range + * @prange: svm range structure + * @mm: current process mm_struct + * @start: starting process queue number + * @last: last process queue number * * Stop all queues of the process to ensure GPU doesn't access the memory, then * return to let CPU evict the buffer and proceed CPU pagetable update. @@ -1768,46 +1801,49 @@ static struct svm_range *svm_range_clone(struct svm_range *old) } /** - * svm_range_handle_overlap - split overlap ranges - * @svms: svm range list header - * @new: range added with this attributes - * @start: range added start address, in pages - * @last: range last address, in pages - * @update_list: output, the ranges attributes are updated. For set_attr, this - * will do validation and map to GPUs. For unmap, this will be - * removed and unmap from GPUs - * @insert_list: output, the ranges will be inserted into svms, attributes are - * not changes. For set_attr, this will add into svms. - * @remove_list:output, the ranges will be removed from svms - * @left: the remaining range after overlap, For set_attr, this will be added - * as new range. + * svm_range_add - add svm range and handle overlap + * @p: the range add to this process svms + * @start: page size aligned + * @size: page size aligned + * @nattr: number of attributes + * @attrs: array of attributes + * @update_list: output, the ranges need validate and update GPU mapping + * @insert_list: output, the ranges need insert to svms + * @remove_list: output, the ranges are replaced and need remove from svms * - * Total have 5 overlap cases. + * Check if the virtual address range has overlap with any existing ranges, + * split partly overlapping ranges and add new ranges in the gaps. All changes + * should be applied to the range_list and interval tree transactionally. If + * any range split or allocation fails, the entire update fails. Therefore any + * existing overlapping svm_ranges are cloned and the original svm_ranges left + * unchanged. * - * This function handles overlap of an address interval with existing - * struct svm_ranges for applying new attributes. This may require - * splitting existing struct svm_ranges. All changes should be applied to - * the range_list and interval tree transactionally. If any split operation - * fails, the entire update fails. Therefore the existing overlapping - * svm_ranges are cloned and the original svm_ranges left unchanged. If the - * transaction succeeds, the modified clones are added and the originals - * freed. Otherwise the clones are removed and the old svm_ranges remain. + * If the transaction succeeds, the caller can update and insert clones and + * new ranges, then free the originals. * - * Context: The caller must hold svms->lock + * Otherwise the caller can free the clones and new ranges, while the old + * svm_ranges remain unchanged. + * + * Context: Process context, caller must hold svms->lock + * + * Return: + * 0 - OK, otherwise error code */ static int -svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, - unsigned long start, unsigned long last, - struct list_head *update_list, - struct list_head *insert_list, - struct list_head *remove_list, - unsigned long *left) +svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, + uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, + struct list_head *update_list, struct list_head *insert_list, + struct list_head *remove_list) { + unsigned long last = start + size - 1UL; + struct svm_range_list *svms = &p->svms; struct interval_tree_node *node; struct svm_range *prange; struct svm_range *tmp; int r = 0; + pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last); + INIT_LIST_HEAD(update_list); INIT_LIST_HEAD(insert_list); INIT_LIST_HEAD(remove_list); @@ -1815,18 +1851,24 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, node = interval_tree_iter_first(&svms->objects, start, last); while (node) { struct interval_tree_node *next; - struct svm_range *old; unsigned long next_start; pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start, node->last); - old = container_of(node, struct svm_range, it_node); + prange = container_of(node, struct svm_range, it_node); next = interval_tree_iter_next(node, start, last); next_start = min(node->last, last) + 1; - if (node->start < start || node->last > last) { - /* node intersects the updated range, clone+split it */ + if (svm_range_is_same_attrs(p, prange, nattr, attrs)) { + /* nothing to do */ + } else if (node->start < start || node->last > last) { + /* node intersects the update range and its attributes + * will change. Clone and split it, apply updates only + * to the overlapping part + */ + struct svm_range *old = prange; + prange = svm_range_clone(old); if (!prange) { r = -ENOMEM; @@ -1835,17 +1877,18 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, list_add(&old->remove_list, remove_list); list_add(&prange->insert_list, insert_list); + list_add(&prange->update_list, update_list); if (node->start < start) { pr_debug("change old range start\n"); - r = svm_range_split_head(prange, new, start, + r = svm_range_split_head(prange, start, insert_list); if (r) goto out; } if (node->last > last) { pr_debug("change old range last\n"); - r = svm_range_split_tail(prange, new, last, + r = svm_range_split_tail(prange, last, insert_list); if (r) goto out; @@ -1854,16 +1897,12 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, /* The node is contained within start..last, * just update it */ - prange = old; - } - - if (!svm_range_is_same_attrs(prange, new)) list_add(&prange->update_list, update_list); + } /* insert a new node if needed */ if (node->start > start) { - prange = svm_range_new(prange->svms, start, - node->start - 1); + prange = svm_range_new(svms, start, node->start - 1); if (!prange) { r = -ENOMEM; goto out; @@ -1877,8 +1916,16 @@ svm_range_handle_overlap(struct svm_range_list *svms, struct svm_range *new, start = next_start; } - if (left && start <= last) - *left = last - start + 1; + /* add a final range at the end if needed */ + if (start <= last) { + prange = svm_range_new(svms, start, last); + if (!prange) { + r = -ENOMEM; + goto out; + } + list_add(&prange->insert_list, insert_list); + list_add(&prange->update_list, update_list); + } out: if (r) @@ -1966,23 +2013,30 @@ svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange) static void svm_range_drain_retry_fault(struct svm_range_list *svms) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; struct kfd_process *p; + int drain; uint32_t i; p = container_of(svms, struct kfd_process, svms); +restart: + drain = atomic_read(&svms->drain_pagefaults); + if (!drain) + return; + for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { pdd = p->pdds[i]; if (!pdd) continue; pr_debug("drain retry fault gpu %d svms %p\n", i, svms); - adev = (struct amdgpu_device *)pdd->dev->kgd; - amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1); + amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev, + &pdd->dev->adev->irq.ih1); pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); } + if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain) + goto restart; } static void svm_range_deferred_list_work(struct work_struct *work) @@ -1990,43 +2044,41 @@ static void svm_range_deferred_list_work(struct work_struct *work) struct svm_range_list *svms; struct svm_range *prange; struct mm_struct *mm; + struct kfd_process *p; svms = container_of(work, struct svm_range_list, deferred_list_work); pr_debug("enter svms 0x%p\n", svms); + p = container_of(svms, struct kfd_process, svms); + /* Avoid mm is gone when inserting mmu notifier */ + mm = get_task_mm(p->lead_thread); + if (!mm) { + pr_debug("svms 0x%p process mm gone\n", svms); + return; + } +retry: + mmap_write_lock(mm); + + /* Checking for the need to drain retry faults must be inside + * mmap write lock to serialize with munmap notifiers. + */ + if (unlikely(atomic_read(&svms->drain_pagefaults))) { + mmap_write_unlock(mm); + svm_range_drain_retry_fault(svms); + goto retry; + } + spin_lock(&svms->deferred_list_lock); while (!list_empty(&svms->deferred_range_list)) { prange = list_first_entry(&svms->deferred_range_list, struct svm_range, deferred_list); + list_del_init(&prange->deferred_list); spin_unlock(&svms->deferred_list_lock); + pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange, prange->start, prange->last, prange->work_item.op); - mm = prange->work_item.mm; -retry: - mmap_write_lock(mm); mutex_lock(&svms->lock); - - /* Checking for the need to drain retry faults must be in - * mmap write lock to serialize with munmap notifiers. - * - * Remove from deferred_list must be inside mmap write lock, - * otherwise, svm_range_list_lock_and_flush_work may hold mmap - * write lock, and continue because deferred_list is empty, then - * deferred_list handle is blocked by mmap write lock. - */ - spin_lock(&svms->deferred_list_lock); - if (unlikely(svms->drain_pagefaults)) { - svms->drain_pagefaults = false; - spin_unlock(&svms->deferred_list_lock); - mutex_unlock(&svms->lock); - mmap_write_unlock(mm); - svm_range_drain_retry_fault(svms); - goto retry; - } - list_del_init(&prange->deferred_list); - spin_unlock(&svms->deferred_list_lock); - mutex_lock(&prange->migrate_mutex); while (!list_empty(&prange->child_list)) { struct svm_range *pchild; @@ -2042,12 +2094,13 @@ retry: svm_range_handle_list_op(svms, prange); mutex_unlock(&svms->lock); - mmap_write_unlock(mm); spin_lock(&svms->deferred_list_lock); } spin_unlock(&svms->deferred_list_lock); + mmap_write_unlock(mm); + mmput(mm); pr_debug("exit svms 0x%p\n", svms); } @@ -2056,12 +2109,6 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, struct mm_struct *mm, enum svm_work_list_ops op) { spin_lock(&svms->deferred_list_lock); - /* Make sure pending page faults are drained in the deferred worker - * before the range is freed to avoid straggler interrupts on - * unmapped memory causing "phantom faults". - */ - if (op == SVM_OP_UNMAP_RANGE) - svms->drain_pagefaults = true; /* if prange is on the deferred list */ if (!list_empty(&prange->deferred_list)) { pr_debug("update exist prange 0x%p work op %d\n", prange, op); @@ -2140,6 +2187,12 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms, prange, prange->start, prange->last, start, last); + /* Make sure pending page faults are drained in the deferred worker + * before the range is freed to avoid straggler interrupts on + * unmapped memory causing "phantom faults". + */ + atomic_inc(&svms->drain_pagefaults); + unmap_parent = start <= prange->start && last >= prange->last; list_for_each_entry(pchild, &prange->child_list, child_list) { @@ -2169,6 +2222,9 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, /** * svm_range_cpu_invalidate_pagetables - interval notifier callback + * @mni: mmu_interval_notifier struct + * @range: mmu_notifier_range struct + * @cur_seq: value to pass to mmu_interval_set_seq() * * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it * is from migration, or CPU page invalidation callback. @@ -2301,7 +2357,7 @@ svm_range_best_restore_location(struct svm_range *prange, p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, gpuidx); + r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx); if (r < 0) { pr_debug("failed to get gpuid from kgd\n"); return -1; @@ -2478,7 +2534,7 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, pr_debug("Failed to create prange in address [0x%llx]\n", addr); return NULL; } - if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) { + if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) { pr_debug("failed to get gpuid from kgd\n"); svm_range_free(prange); return NULL; @@ -2545,7 +2601,7 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, uint32_t gpuid; int r; - r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx); + r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx); if (r < 0) return; } @@ -2559,20 +2615,13 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, } static bool -svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault) +svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) { unsigned long requested = VM_READ; - struct vm_area_struct *vma; if (write_fault) requested |= VM_WRITE; - vma = find_vma(mm, addr << PAGE_SHIFT); - if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { - pr_debug("address 0x%llx VMA is removed\n", addr); - return true; - } - pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested, vma->vm_flags); return (vma->vm_flags & requested) == requested; @@ -2590,6 +2639,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, int32_t best_loc; int32_t gpuidx = MAX_GPU_INSTANCE; bool write_locked = false; + struct vm_area_struct *vma; int r = 0; if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) { @@ -2600,7 +2650,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, p = kfd_lookup_process_by_pasid(pasid); if (!p) { pr_debug("kfd process not founded pasid 0x%x\n", pasid); - return -ESRCH; + return 0; } if (!p->xnack_enabled) { pr_debug("XNACK not enabled for pasid 0x%x\n", pasid); @@ -2611,10 +2661,19 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr); + if (atomic_read(&svms->drain_pagefaults)) { + pr_debug("draining retry fault, drop fault 0x%llx\n", addr); + r = 0; + goto out; + } + + /* p->lead_thread is available as kfd_process_wq_release flush the work + * before releasing task ref. + */ mm = get_task_mm(p->lead_thread); if (!mm) { pr_debug("svms 0x%p failed to get mm\n", svms); - r = -ESRCH; + r = 0; goto out; } @@ -2652,6 +2711,7 @@ retry_write_locked: if (svm_range_skip_recover(prange)) { amdgpu_gmc_filter_faults_remove(adev, addr, pasid); + r = 0; goto out_unlock_range; } @@ -2660,10 +2720,21 @@ retry_write_locked: if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) { pr_debug("svms 0x%p [0x%lx %lx] already restored\n", svms, prange->start, prange->last); + r = 0; + goto out_unlock_range; + } + + /* __do_munmap removed VMA, return success as we are handling stale + * retry fault. + */ + vma = find_vma(mm, addr << PAGE_SHIFT); + if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { + pr_debug("address 0x%llx VMA is removed\n", addr); + r = 0; goto out_unlock_range; } - if (!svm_fault_allowed(mm, addr, write_fault)) { + if (!svm_fault_allowed(vma, write_fault)) { pr_debug("fault addr 0x%llx no %s permission\n", addr, write_fault ? "write" : "read"); r = -EPERM; @@ -2741,6 +2812,14 @@ void svm_range_list_fini(struct kfd_process *p) /* Ensure list work is finished before process is destroyed */ flush_work(&p->svms.deferred_list_work); + /* + * Ensure no retry fault comes in afterwards, as page fault handler will + * not find kfd process and take mm lock to recover fault. + */ + atomic_inc(&p->svms.drain_pagefaults); + svm_range_drain_retry_fault(&p->svms); + + list_for_each_entry_safe(prange, next, &p->svms.list, list) { svm_range_unlink(prange); svm_range_remove_notifier(prange); @@ -2761,6 +2840,7 @@ int svm_range_list_init(struct kfd_process *p) mutex_init(&svms->lock); INIT_LIST_HEAD(&svms->list); atomic_set(&svms->evicted_ranges, 0); + atomic_set(&svms->drain_pagefaults, 0); INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work); INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work); INIT_LIST_HEAD(&svms->deferred_range_list); @@ -2868,59 +2948,6 @@ svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size) } /** - * svm_range_add - add svm range and handle overlap - * @p: the range add to this process svms - * @start: page size aligned - * @size: page size aligned - * @nattr: number of attributes - * @attrs: array of attributes - * @update_list: output, the ranges need validate and update GPU mapping - * @insert_list: output, the ranges need insert to svms - * @remove_list: output, the ranges are replaced and need remove from svms - * - * Check if the virtual address range has overlap with the registered ranges, - * split the overlapped range, copy and adjust pages address and vram nodes in - * old and new ranges. - * - * Context: Process context, caller must hold svms->lock - * - * Return: - * 0 - OK, otherwise error code - */ -static int -svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, - uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, - struct list_head *update_list, struct list_head *insert_list, - struct list_head *remove_list) -{ - uint64_t last = start + size - 1UL; - struct svm_range_list *svms; - struct svm_range new = {0}; - struct svm_range *prange; - unsigned long left = 0; - int r = 0; - - pr_debug("svms 0x%p [0x%llx 0x%llx]\n", &p->svms, start, last); - - svm_range_apply_attrs(p, &new, nattr, attrs); - - svms = &p->svms; - - r = svm_range_handle_overlap(svms, &new, start, last, update_list, - insert_list, remove_list, &left); - if (r) - return r; - - if (left) { - prange = svm_range_new(svms, last - left + 1, last); - list_add(&prange->insert_list, insert_list); - list_add(&prange->update_list, update_list); - } - - return 0; -} - -/** * svm_range_best_prefetch_location - decide the best prefetch location * @prange: svm range structure * @@ -2953,7 +2980,6 @@ svm_range_best_prefetch_location(struct svm_range *prange) uint32_t best_loc = prange->prefetch_loc; struct kfd_process_device *pdd; struct amdgpu_device *bo_adev; - struct amdgpu_device *adev; struct kfd_process *p; uint32_t gpuidx; @@ -2981,12 +3007,11 @@ svm_range_best_prefetch_location(struct svm_range *prange) pr_debug("failed to get device by idx 0x%x\n", gpuidx); continue; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - if (adev == bo_adev) + if (pdd->dev->adev == bo_adev) continue; - if (!amdgpu_xgmi_same_hive(adev, bo_adev)) { + if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { best_loc = 0; break; } @@ -3150,7 +3175,6 @@ static int svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) { - struct amdkfd_process_info *process_info = p->kgd_process_info; struct mm_struct *mm = current->mm; struct list_head update_list; struct list_head insert_list; @@ -3169,8 +3193,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, svms = &p->svms; - mutex_lock(&process_info->lock); - svm_range_list_lock_and_flush_work(svms, mm); r = svm_range_is_valid(p, start, size); @@ -3246,8 +3268,6 @@ out_unlock_range: mutex_unlock(&svms->lock); mmap_read_unlock(mm); out: - mutex_unlock(&process_info->lock); - pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid, &p->svms, start, start + size - 1, r); |