aboutsummaryrefslogtreecommitdiffstats
path: root/mm/gup.c
diff options
context:
space:
mode:
authorzhong jiang <zhongjiang@huawei.com>2019-11-30 17:49:50 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-12-01 06:29:18 -0800
commitb96cc65515bb16f90a361c01da088ce09ad3cf92 (patch)
tree773593dd0ac9bc7c75bfe25e0f44f3cdab9470c4 /mm/gup.c
parentmm/filemap.c: warn if stale pagecache is left after direct write (diff)
downloadlinux-dev-b96cc65515bb16f90a361c01da088ce09ad3cf92.tar.xz
linux-dev-b96cc65515bb16f90a361c01da088ce09ad3cf92.zip
mm/gup.c: allow CMA migration to propagate errors back to caller
check_and_migrate_cma_pages() was recording the result of __get_user_pages_locked() in an unsigned "nr_pages" variable. Because __get_user_pages_locked() returns a signed value that can include negative errno values, this had the effect of hiding errors. Change check_and_migrate_cma_pages() implementation so that it uses a signed variable instead, and propagates the results back to the caller just as other gup internal functions do. This was discovered with the help of unsigned_lesser_than_zero.cocci. Link: http://lkml.kernel.org/r/1571671030-58029-1-git-send-email-zhongjiang@huawei.com Signed-off-by: zhong jiang <zhongjiang@huawei.com> Suggested-by: John Hubbard <jhubbard@nvidia.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/gup.c')
-rw-r--r--mm/gup.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/mm/gup.c b/mm/gup.c
index 8f236a335ae9..c2b3e117d706 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1443,6 +1443,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
bool drain_allow = true;
bool migrate_allow = true;
LIST_HEAD(cma_page_list);
+ long ret = nr_pages;
check_again:
for (i = 0; i < nr_pages;) {
@@ -1504,17 +1505,18 @@ check_again:
* again migrating any new CMA pages which we failed to isolate
* earlier.
*/
- nr_pages = __get_user_pages_locked(tsk, mm, start, nr_pages,
+ ret = __get_user_pages_locked(tsk, mm, start, nr_pages,
pages, vmas, NULL,
gup_flags);
- if ((nr_pages > 0) && migrate_allow) {
+ if ((ret > 0) && migrate_allow) {
+ nr_pages = ret;
drain_allow = true;
goto check_again;
}
}
- return nr_pages;
+ return ret;
}
#else
static long check_and_migrate_cma_pages(struct task_struct *tsk,