aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/scatterlist/main.c
diff options
context:
space:
mode:
authorMaor Gottlieb <maorg@nvidia.com>2021-08-24 17:25:30 +0300
committerJason Gunthorpe <jgg@nvidia.com>2021-08-24 19:52:40 -0300
commit3e302dbc6774a27edaea39a1d5107f0c12e35cf2 (patch)
tree082ed97d3a748ff89836ba6a2d7954a16fb029c8 /tools/testing/scatterlist/main.c
parentlib/scatterlist: Provide a dedicated function to support table append (diff)
downloadlinux-dev-3e302dbc6774a27edaea39a1d5107f0c12e35cf2.tar.xz
linux-dev-3e302dbc6774a27edaea39a1d5107f0c12e35cf2.zip
lib/scatterlist: Fix wrong update of orig_nents
orig_nents should represent the number of entries with pages, but __sg_alloc_table_from_pages sets orig_nents as the number of total entries in the table. This is wrong when the API is used for dynamic allocation where not all the table entries are mapped with pages. It wasn't observed until now, since RDMA umem who uses this API in the dynamic form doesn't use orig_nents implicit or explicit by the scatterlist APIs. Fix it by changing the append API to track the SG append table state and have an API to free the append table according to the total number of entries in the table. Now all APIs set orig_nents as number of enries with pages. Fixes: 07da1223ec93 ("lib/scatterlist: Add support in dynamic allocation of SG table from pages") Link: https://lore.kernel.org/r/20210824142531.3877007-3-maorg@nvidia.com Signed-off-by: Maor Gottlieb <maorg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to '')
-rw-r--r--tools/testing/scatterlist/main.c45
1 files changed, 24 insertions, 21 deletions
diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c
index c2ff9179c2cc..08465a701cd5 100644
--- a/tools/testing/scatterlist/main.c
+++ b/tools/testing/scatterlist/main.c
@@ -85,43 +85,46 @@ int main(void)
for (i = 0, test = tests; test->expected_segments; test++, i++) {
int left_pages = test->pfn_app ? test->num_pages : 0;
+ struct sg_append_table append = {};
struct page *pages[MAX_PAGES];
- struct sg_table st;
- struct scatterlist *sg = NULL;
int ret;
set_pages(pages, test->pfn, test->num_pages);
- if (test->pfn_app) {
- sg = sg_alloc_append_table_from_pages(
- &st, pages, test->num_pages, 0, test->size,
- test->max_seg, NULL, left_pages, GFP_KERNEL);
- assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret);
- } else {
+ if (test->pfn_app)
+ ret = sg_alloc_append_table_from_pages(
+ &append, pages, test->num_pages, 0, test->size,
+ test->max_seg, left_pages, GFP_KERNEL);
+ else
ret = sg_alloc_table_from_pages_segment(
- &st, pages, test->num_pages, 0, test->size,
- test->max_seg, GFP_KERNEL);
- assert(ret == test->alloc_ret);
- }
+ &append.sgt, pages, test->num_pages, 0,
+ test->size, test->max_seg, GFP_KERNEL);
+
+ assert(ret == test->alloc_ret);
if (test->alloc_ret)
continue;
if (test->pfn_app) {
set_pages(pages, test->pfn_app, test->num_pages);
- sg = sg_alloc_append_table_from_pages(
- &st, pages, test->num_pages, 0, test->size,
- test->max_seg, sg, 0, GFP_KERNEL);
+ ret = sg_alloc_append_table_from_pages(
+ &append, pages, test->num_pages, 0, test->size,
+ test->max_seg, 0, GFP_KERNEL);
- assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret);
+ assert(ret == test->alloc_ret);
}
- VALIDATE(st.nents == test->expected_segments, &st, test);
+ VALIDATE(append.sgt.nents == test->expected_segments,
+ &append.sgt, test);
if (!test->pfn_app)
- VALIDATE(st.orig_nents == test->expected_segments, &st,
- test);
-
- sg_free_table(&st);
+ VALIDATE(append.sgt.orig_nents ==
+ test->expected_segments,
+ &append.sgt, test);
+
+ if (test->pfn_app)
+ sg_free_append_table(&append);
+ else
+ sg_free_table(&append.sgt);
}
assert(i == (sizeof(tests) / sizeof(tests[0])) - 1);