aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_prime.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_prime.c')
-rw-r--r--drivers/gpu/drm/drm_prime.c104
1 files changed, 62 insertions, 42 deletions
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1693aa7c14b5..11fe9ff76fd5 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -617,6 +617,7 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct sg_table *sgt;
+ int ret;
if (WARN_ON(dir == DMA_NONE))
return ERR_PTR(-EINVAL);
@@ -626,11 +627,12 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
else
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC)) {
+ ret = dma_map_sgtable(attach->dev, sgt, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (ret) {
sg_free_table(sgt);
kfree(sgt);
- sgt = ERR_PTR(-ENOMEM);
+ sgt = ERR_PTR(ret);
}
return sgt;
@@ -652,8 +654,7 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
if (!sgt)
return;
- dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sgt);
kfree(sgt);
}
@@ -802,9 +803,11 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
*
* This is useful for implementing &drm_gem_object_funcs.get_sg_table.
*/
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
+struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
+ struct page **pages, unsigned int nr_pages)
{
struct sg_table *sg = NULL;
+ size_t max_segment = 0;
int ret;
sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
@@ -813,8 +816,13 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_page
goto out;
}
- ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (dev)
+ max_segment = dma_max_mapping_size(dev->dev);
+ if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
+ max_segment = SCATTERLIST_MAX_SEGMENT;
+ ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT,
+ max_segment, GFP_KERNEL);
if (ret)
goto out;
@@ -826,6 +834,37 @@ out:
EXPORT_SYMBOL(drm_prime_pages_to_sg);
/**
+ * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
+ * @sgt: sg_table describing the buffer to check
+ *
+ * This helper calculates the contiguous size in the DMA address space
+ * of the the buffer described by the provided sg_table.
+ *
+ * This is useful for implementing
+ * &drm_gem_object_funcs.gem_prime_import_sg_table.
+ */
+unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
+{
+ dma_addr_t expected = sg_dma_address(sgt->sgl);
+ struct scatterlist *sg;
+ unsigned long size = 0;
+ int i;
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ unsigned int len = sg_dma_len(sg);
+
+ if (!len)
+ break;
+ if (sg_dma_address(sg) != expected)
+ break;
+ expected += len;
+ size += len;
+ }
+ return size;
+}
+EXPORT_SYMBOL(drm_prime_get_contiguous_size);
+
+/**
* drm_gem_prime_export - helper library implementation of the export callback
* @obj: GEM object to export
* @flags: flags like DRM_CLOEXEC and DRM_RDWR
@@ -959,45 +998,26 @@ EXPORT_SYMBOL(drm_gem_prime_import);
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_entries)
{
- unsigned count;
- struct scatterlist *sg;
- struct page *page;
- u32 page_len, page_index;
- dma_addr_t addr;
- u32 dma_len, dma_index;
-
- /*
- * Scatterlist elements contains both pages and DMA addresses, but
- * one shoud not assume 1:1 relation between them. The sg->length is
- * the size of the physical memory chunk described by the sg->page,
- * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk
- * described by the sg_dma_address(sg).
- */
- page_index = 0;
- dma_index = 0;
- for_each_sg(sgt->sgl, sg, sgt->nents, count) {
- page_len = sg->length;
- page = sg_page(sg);
- dma_len = sg_dma_len(sg);
- addr = sg_dma_address(sg);
-
- while (pages && page_len > 0) {
- if (WARN_ON(page_index >= max_entries))
+ struct sg_dma_page_iter dma_iter;
+ struct sg_page_iter page_iter;
+ struct page **p = pages;
+ dma_addr_t *a = addrs;
+
+ if (pages) {
+ for_each_sgtable_page(sgt, &page_iter, 0) {
+ if (WARN_ON(p - pages >= max_entries))
return -1;
- pages[page_index] = page;
- page++;
- page_len -= PAGE_SIZE;
- page_index++;
+ *p++ = sg_page_iter_page(&page_iter);
}
- while (addrs && dma_len > 0) {
- if (WARN_ON(dma_index >= max_entries))
+ }
+ if (addrs) {
+ for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+ if (WARN_ON(a - addrs >= max_entries))
return -1;
- addrs[dma_index] = addr;
- addr += PAGE_SIZE;
- dma_len -= PAGE_SIZE;
- dma_index++;
+ *a++ = sg_page_iter_dma_address(&dma_iter);
}
}
+
return 0;
}
EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);