diff options
Diffstat (limited to '')
-rw-r--r-- | drivers/dma-buf/udmabuf.c | 275 |
1 files changed, 168 insertions, 107 deletions
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 047c3cd2ceff..8ce1f074c2d3 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -27,15 +27,21 @@ MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is struct udmabuf { pgoff_t pagecount; struct folio **folios; + + /** + * Unlike folios, pinned_folios is only used for unpin. + * So, nr_pinned is not the same to pagecount, the pinned_folios + * only set each folio which already pinned when udmabuf_create. + * Note that, since a folio may be pinned multiple times, each folio + * can be added to pinned_folios multiple times, depending on how many + * times the folio has been pinned when create. + */ + pgoff_t nr_pinned; + struct folio **pinned_folios; + struct sg_table *sg; struct miscdevice *device; pgoff_t *offsets; - struct list_head unpin_list; -}; - -struct udmabuf_folio { - struct folio *folio; - struct list_head list; }; static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) @@ -43,7 +49,8 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; struct udmabuf *ubuf = vma->vm_private_data; pgoff_t pgoff = vmf->pgoff; - unsigned long pfn; + unsigned long addr, pfn; + vm_fault_t ret; if (pgoff >= ubuf->pagecount) return VM_FAULT_SIGBUS; @@ -51,7 +58,35 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) pfn = folio_pfn(ubuf->folios[pgoff]); pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT; - return vmf_insert_pfn(vma, vmf->address, pfn); + ret = vmf_insert_pfn(vma, vmf->address, pfn); + if (ret & VM_FAULT_ERROR) + return ret; + + /* pre fault */ + pgoff = vma->vm_pgoff; + addr = vma->vm_start; + + for (; addr < vma->vm_end; pgoff++, addr += PAGE_SIZE) { + if (addr == vmf->address) + continue; + + if (WARN_ON(pgoff >= ubuf->pagecount)) + break; + + pfn = folio_pfn(ubuf->folios[pgoff]); + pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT; + + /** + * If the below vmf_insert_pfn() fails, we do not return an + * error here during this pre-fault step. However, an error + * will be returned if the failure occurs when the addr is + * truly accessed. + */ + if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR) + break; + } + + return ret; } static const struct vm_operations_struct udmabuf_vm_ops = { @@ -74,21 +109,29 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) { struct udmabuf *ubuf = buf->priv; - struct page **pages; + unsigned long *pfns; void *vaddr; pgoff_t pg; dma_resv_assert_held(buf->resv); - pages = kmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL); - if (!pages) + /** + * HVO may free tail pages, so just use pfn to map each folio + * into vmalloc area. + */ + pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL); + if (!pfns) return -ENOMEM; - for (pg = 0; pg < ubuf->pagecount; pg++) - pages[pg] = &ubuf->folios[pg]->page; + for (pg = 0; pg < ubuf->pagecount; pg++) { + unsigned long pfn = folio_pfn(ubuf->folios[pg]); + + pfn += ubuf->offsets[pg] >> PAGE_SHIFT; + pfns[pg] = pfn; + } - vaddr = vm_map_ram(pages, ubuf->pagecount, -1); - kfree(pages); + vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL); + kvfree(pfns); if (!vaddr) return -EINVAL; @@ -159,34 +202,42 @@ static void unmap_udmabuf(struct dma_buf_attachment *at, return put_sg_table(at->dev, sg, direction); } -static void unpin_all_folios(struct list_head *unpin_list) +static void unpin_all_folios(struct udmabuf *ubuf) { - struct udmabuf_folio *ubuf_folio; + pgoff_t i; - while (!list_empty(unpin_list)) { - ubuf_folio = list_first_entry(unpin_list, - struct udmabuf_folio, list); - unpin_folio(ubuf_folio->folio); + for (i = 0; i < ubuf->nr_pinned; ++i) + unpin_folio(ubuf->pinned_folios[i]); - list_del(&ubuf_folio->list); - kfree(ubuf_folio); - } + kvfree(ubuf->pinned_folios); } -static int add_to_unpin_list(struct list_head *unpin_list, - struct folio *folio) +static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt) { - struct udmabuf_folio *ubuf_folio; + ubuf->folios = kvmalloc_array(pgcnt, sizeof(*ubuf->folios), GFP_KERNEL); + if (!ubuf->folios) + return -ENOMEM; - ubuf_folio = kzalloc(sizeof(*ubuf_folio), GFP_KERNEL); - if (!ubuf_folio) + ubuf->offsets = kvcalloc(pgcnt, sizeof(*ubuf->offsets), GFP_KERNEL); + if (!ubuf->offsets) + return -ENOMEM; + + ubuf->pinned_folios = kvmalloc_array(pgcnt, + sizeof(*ubuf->pinned_folios), + GFP_KERNEL); + if (!ubuf->pinned_folios) return -ENOMEM; - ubuf_folio->folio = folio; - list_add_tail(&ubuf_folio->list, unpin_list); return 0; } +static __always_inline void deinit_udmabuf(struct udmabuf *ubuf) +{ + unpin_all_folios(ubuf); + kvfree(ubuf->offsets); + kvfree(ubuf->folios); +} + static void release_udmabuf(struct dma_buf *buf) { struct udmabuf *ubuf = buf->priv; @@ -195,9 +246,7 @@ static void release_udmabuf(struct dma_buf *buf) if (ubuf->sg) put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL); - unpin_all_folios(&ubuf->unpin_list); - kfree(ubuf->offsets); - kfree(ubuf->folios); + deinit_udmabuf(ubuf); kfree(ubuf); } @@ -254,9 +303,6 @@ static int check_memfd_seals(struct file *memfd) { int seals; - if (!memfd) - return -EBADFD; - if (!shmem_file(memfd) && !is_file_hugepages(memfd)) return -EBADFD; @@ -291,100 +337,116 @@ static int export_udmabuf(struct udmabuf *ubuf, return dma_buf_fd(buf, flags); } +static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd, + loff_t start, loff_t size, struct folio **folios) +{ + pgoff_t nr_pinned = ubuf->nr_pinned; + pgoff_t upgcnt = ubuf->pagecount; + u32 cur_folio, cur_pgcnt; + pgoff_t pgoff, pgcnt; + long nr_folios; + loff_t end; + + pgcnt = size >> PAGE_SHIFT; + end = start + (pgcnt << PAGE_SHIFT) - 1; + nr_folios = memfd_pin_folios(memfd, start, end, folios, pgcnt, &pgoff); + if (nr_folios <= 0) + return nr_folios ? nr_folios : -EINVAL; + + cur_pgcnt = 0; + for (cur_folio = 0; cur_folio < nr_folios; ++cur_folio) { + pgoff_t subpgoff = pgoff; + size_t fsize = folio_size(folios[cur_folio]); + + ubuf->pinned_folios[nr_pinned++] = folios[cur_folio]; + + for (; subpgoff < fsize; subpgoff += PAGE_SIZE) { + ubuf->folios[upgcnt] = folios[cur_folio]; + ubuf->offsets[upgcnt] = subpgoff; + ++upgcnt; + + if (++cur_pgcnt >= pgcnt) + goto end; + } + + /** + * In a given range, only the first subpage of the first folio + * has an offset, that is returned by memfd_pin_folios(). + * The first subpages of other folios (in the range) have an + * offset of 0. + */ + pgoff = 0; + } +end: + ubuf->pagecount = upgcnt; + ubuf->nr_pinned = nr_pinned; + return 0; +} + static long udmabuf_create(struct miscdevice *device, struct udmabuf_create_list *head, struct udmabuf_create_item *list) { - pgoff_t pgoff, pgcnt, pglimit, pgbuf = 0; - long nr_folios, ret = -EINVAL; - struct file *memfd = NULL; - struct folio **folios; + unsigned long max_nr_folios = 0; + struct folio **folios = NULL; + pgoff_t pgcnt = 0, pglimit; struct udmabuf *ubuf; - u32 i, j, k, flags; - loff_t end; + long ret = -EINVAL; + u32 i, flags; ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL); if (!ubuf) return -ENOMEM; - INIT_LIST_HEAD(&ubuf->unpin_list); pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; for (i = 0; i < head->count; i++) { - if (!IS_ALIGNED(list[i].offset, PAGE_SIZE)) - goto err; - if (!IS_ALIGNED(list[i].size, PAGE_SIZE)) - goto err; - ubuf->pagecount += list[i].size >> PAGE_SHIFT; - if (ubuf->pagecount > pglimit) - goto err; + pgoff_t subpgcnt; + + if (!PAGE_ALIGNED(list[i].offset)) + goto err_noinit; + if (!PAGE_ALIGNED(list[i].size)) + goto err_noinit; + + subpgcnt = list[i].size >> PAGE_SHIFT; + pgcnt += subpgcnt; + if (pgcnt > pglimit) + goto err_noinit; + + max_nr_folios = max_t(unsigned long, subpgcnt, max_nr_folios); } - if (!ubuf->pagecount) - goto err; + if (!pgcnt) + goto err_noinit; - ubuf->folios = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->folios), - GFP_KERNEL); - if (!ubuf->folios) { - ret = -ENOMEM; + ret = init_udmabuf(ubuf, pgcnt); + if (ret) goto err; - } - ubuf->offsets = kcalloc(ubuf->pagecount, sizeof(*ubuf->offsets), - GFP_KERNEL); - if (!ubuf->offsets) { + + folios = kvmalloc_array(max_nr_folios, sizeof(*folios), GFP_KERNEL); + if (!folios) { ret = -ENOMEM; goto err; } - pgbuf = 0; for (i = 0; i < head->count; i++) { - memfd = fget(list[i].memfd); - ret = check_memfd_seals(memfd); - if (ret < 0) - goto err; + struct file *memfd = fget(list[i].memfd); - pgcnt = list[i].size >> PAGE_SHIFT; - folios = kmalloc_array(pgcnt, sizeof(*folios), GFP_KERNEL); - if (!folios) { - ret = -ENOMEM; + if (!memfd) { + ret = -EBADFD; goto err; } - end = list[i].offset + (pgcnt << PAGE_SHIFT) - 1; - ret = memfd_pin_folios(memfd, list[i].offset, end, - folios, pgcnt, &pgoff); - if (ret <= 0) { - kfree(folios); - if (!ret) - ret = -EINVAL; + ret = check_memfd_seals(memfd); + if (ret < 0) { + fput(memfd); goto err; } - nr_folios = ret; - pgoff >>= PAGE_SHIFT; - for (j = 0, k = 0; j < pgcnt; j++) { - ubuf->folios[pgbuf] = folios[k]; - ubuf->offsets[pgbuf] = pgoff << PAGE_SHIFT; - - if (j == 0 || ubuf->folios[pgbuf-1] != folios[k]) { - ret = add_to_unpin_list(&ubuf->unpin_list, - folios[k]); - if (ret < 0) { - kfree(folios); - goto err; - } - } - - pgbuf++; - if (++pgoff == folio_nr_pages(folios[k])) { - pgoff = 0; - if (++k == nr_folios) - break; - } - } - - kfree(folios); + ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset, + list[i].size, folios); fput(memfd); - memfd = NULL; + if (ret) + goto err; } flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0; @@ -392,15 +454,14 @@ static long udmabuf_create(struct miscdevice *device, if (ret < 0) goto err; + kvfree(folios); return ret; err: - if (memfd) - fput(memfd); - unpin_all_folios(&ubuf->unpin_list); - kfree(ubuf->offsets); - kfree(ubuf->folios); + deinit_udmabuf(ubuf); +err_noinit: kfree(ubuf); + kvfree(folios); return ret; } |