aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorMaor Gottlieb <maorg@nvidia.com>2021-08-24 17:25:29 +0300
committerJason Gunthorpe <jgg@nvidia.com>2021-08-24 15:21:14 -0300
commit90e7a6de62781c27d6a111fccfb19b807f9b6887 (patch)
tree3d6375338bcc2032b928772f59bbbd3b410389f3 /drivers/gpu
parentLinux 5.14-rc6 (diff)
downloadlinux-dev-90e7a6de62781c27d6a111fccfb19b807f9b6887.tar.xz
linux-dev-90e7a6de62781c27d6a111fccfb19b807f9b6887.zip
lib/scatterlist: Provide a dedicated function to support table append
RDMA is the only in-kernel user that uses __sg_alloc_table_from_pages to append pages dynamically. In the next patch. That mode will be extended and that function will get more parameters. So separate it into a unique function to make such change more clear. Link: https://lore.kernel.org/r/20210824142531.3877007-2-maorg@nvidia.com Signed-off-by: Maor Gottlieb <maorg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_prime.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c14
3 files changed, 15 insertions, 23 deletions
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 2a54f86856af..cf3278041f9c 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -807,8 +807,8 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
struct page **pages, unsigned int nr_pages)
{
struct sg_table *sg;
- struct scatterlist *sge;
size_t max_segment = 0;
+ int err;
sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!sg)
@@ -818,13 +818,12 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
max_segment = dma_max_mapping_size(dev->dev);
if (max_segment == 0)
max_segment = UINT_MAX;
- sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT,
- max_segment,
- NULL, 0, GFP_KERNEL);
- if (IS_ERR(sge)) {
+ err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT,
+ max_segment, GFP_KERNEL);
+ if (err) {
kfree(sg);
- sg = ERR_CAST(sge);
+ sg = ERR_PTR(err);
}
return sg;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 7487bab11f0b..458f797a9e1e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -133,7 +133,6 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st;
unsigned int sg_page_sizes;
- struct scatterlist *sg;
struct page **pvec;
int ret;
@@ -153,13 +152,11 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
spin_unlock(&i915->mm.notifier_lock);
alloc_table:
- sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
- num_pages << PAGE_SHIFT, max_segment,
- NULL, 0, GFP_KERNEL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
+ ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
+ num_pages << PAGE_SHIFT,
+ max_segment, GFP_KERNEL);
+ if (ret)
goto err;
- }
ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 0488042fb287..fc372d2e52a1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -363,7 +363,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
int ret = 0;
static size_t sgl_size;
static size_t sgt_size;
- struct scatterlist *sg;
if (vmw_tt->mapped)
return 0;
@@ -386,15 +385,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
if (unlikely(ret != 0))
return ret;
- sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
- vsgt->num_pages, 0,
- (unsigned long) vsgt->num_pages << PAGE_SHIFT,
- dma_get_max_seg_size(dev_priv->drm.dev),
- NULL, 0, GFP_KERNEL);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
+ ret = sg_alloc_table_from_pages_segment(
+ &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+ (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+ dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
+ if (ret)
goto out_sg_alloc_fail;
- }
if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
uint64_t over_alloc =