diff options
author | 2024-12-17 00:04:34 +0900 | |
---|---|---|
committer | 2025-01-25 20:22:33 -0800 | |
commit | 4610d35c14fc861d1795f8396d8c245ee43f3c37 (patch) | |
tree | 3358c420de5e2ec7d07fffae9d0898b7058debf0 | |
parent | mm/zsmalloc: use zpdesc in trylock_zspage()/lock_zspage() (diff) | |
download | wireguard-linux-4610d35c14fc861d1795f8396d8c245ee43f3c37.tar.xz wireguard-linux-4610d35c14fc861d1795f8396d8c245ee43f3c37.zip |
mm/zsmalloc: convert __zs_map_object/__zs_unmap_object to use zpdesc
These two functions take a pointer to an array of struct page. Make
__zs_{map,unmap}_object() take pointer to an array of zpdesc instead of
page.
Add silly type casting when calling them. Casting will be removed later.
Link: https://lkml.kernel.org/r/20241216150450.1228021-4-42.hyeyoo@gmail.com
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
Acked-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Tested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | mm/zsmalloc.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 51f4a9b78023..c038caaef3a8 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1049,7 +1049,7 @@ static inline void __zs_cpu_down(struct mapping_area *area) } static void *__zs_map_object(struct mapping_area *area, - struct page *pages[2], int off, int size) + struct zpdesc *zpdescs[2], int off, int size) { size_t sizes[2]; char *buf = area->vm_buf; @@ -1065,14 +1065,14 @@ static void *__zs_map_object(struct mapping_area *area, sizes[1] = size - sizes[0]; /* copy object to per-cpu buffer */ - memcpy_from_page(buf, pages[0], off, sizes[0]); - memcpy_from_page(buf + sizes[0], pages[1], 0, sizes[1]); + memcpy_from_page(buf, zpdesc_page(zpdescs[0]), off, sizes[0]); + memcpy_from_page(buf + sizes[0], zpdesc_page(zpdescs[1]), 0, sizes[1]); out: return area->vm_buf; } static void __zs_unmap_object(struct mapping_area *area, - struct page *pages[2], int off, int size) + struct zpdesc *zpdescs[2], int off, int size) { size_t sizes[2]; char *buf; @@ -1090,8 +1090,8 @@ static void __zs_unmap_object(struct mapping_area *area, sizes[1] = size - sizes[0]; /* copy per-cpu buffer to object */ - memcpy_to_page(pages[0], off, buf, sizes[0]); - memcpy_to_page(pages[1], 0, buf + sizes[0], sizes[1]); + memcpy_to_page(zpdesc_page(zpdescs[0]), off, buf, sizes[0]); + memcpy_to_page(zpdesc_page(zpdescs[1]), 0, buf + sizes[0], sizes[1]); out: /* enable page faults to match kunmap_local() return conditions */ @@ -1230,7 +1230,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, pages[1] = get_next_page(page); BUG_ON(!pages[1]); - ret = __zs_map_object(area, pages, off, class->size); + ret = __zs_map_object(area, (struct zpdesc **)pages, off, class->size); out: if (likely(!ZsHugePage(zspage))) ret += ZS_HANDLE_SIZE; @@ -1265,7 +1265,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) pages[1] = get_next_page(page); BUG_ON(!pages[1]); - __zs_unmap_object(area, pages, off, class->size); + __zs_unmap_object(area, (struct zpdesc **)pages, off, class->size); } local_unlock(&zs_map_area.lock); |