From 44835d20b2a0c9b4c0c3fb96e90f4e2fd4a4e41d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 25 Feb 2021 17:15:36 -0800 Subject: mm: add FGP_ENTRY The functionality of find_lock_entry() and find_get_entry() can be provided by pagecache_get_page(), which lets us delete find_lock_entry() and make find_get_entry() static. Link: https://lkml.kernel.org/r/20201112212641.27837-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Christoph Hellwig Cc: Dave Chinner Cc: Hugh Dickins Cc: Jan Kara Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: William Kucharski Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagemap.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index bd629d676a27..b379b2388202 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -315,6 +315,7 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping, #define FGP_NOWAIT 0x00000020 #define FGP_FOR_MMAP 0x00000040 #define FGP_HEAD 0x00000080 +#define FGP_ENTRY 0x00000100 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, int fgp_flags, gfp_t cache_gfp_mask); -- cgit v1.2.3-59-g8ed1b From 41139aa4c3a31ee7e072fc63353c74035aade2ff Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 25 Feb 2021 17:15:48 -0800 Subject: mm/filemap: add mapping_seek_hole_data Rewrite shmem_seek_hole_data() and move it to filemap.c. [willy@infradead.org: don't put an xa_is_value() page] Link: https://lkml.kernel.org/r/20201124041507.28996-4-willy@infradead.org Link: https://lkml.kernel.org/r/20201112212641.27837-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: William Kucharski Reviewed-by: Christoph Hellwig Cc: Dave Chinner Cc: Hugh Dickins Cc: Jan Kara Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagemap.h | 2 ++ mm/filemap.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++++ mm/shmem.c | 74 +++-------------------------------------------- 3 files changed, 82 insertions(+), 70 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index b379b2388202..3608993428d9 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -760,6 +760,8 @@ extern void __delete_from_page_cache(struct page *page, void *shadow); void replace_page_cache_page(struct page *old, struct page *new); void delete_from_page_cache_batch(struct address_space *mapping, struct pagevec *pvec); +loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, + int whence); /* * Like add_to_page_cache_locked, but used to add newly allocated pages: diff --git a/mm/filemap.c b/mm/filemap.c index 21443850aeae..eff3006be12a 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2553,6 +2553,82 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) } EXPORT_SYMBOL(generic_file_read_iter); +static inline bool page_seek_match(struct page *page, bool seek_data) +{ + if (xa_is_value(page) || PageUptodate(page)) + return seek_data; + return !seek_data; +} + +static inline +unsigned int seek_page_size(struct xa_state *xas, struct page *page) +{ + if (xa_is_value(page)) + return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index); + return thp_size(page); +} + +/** + * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache. + * @mapping: Address space to search. + * @start: First byte to consider. + * @end: Limit of search (exclusive). + * @whence: Either SEEK_HOLE or SEEK_DATA. + * + * If the page cache knows which blocks contain holes and which blocks + * contain data, your filesystem can use this function to implement + * SEEK_HOLE and SEEK_DATA. This is useful for filesystems which are + * entirely memory-based such as tmpfs, and filesystems which support + * unwritten extents. + * + * Return: The requested offset on successs, or -ENXIO if @whence specifies + * SEEK_DATA and there is no data after @start. There is an implicit hole + * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start + * and @end contain data. + */ +loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, + loff_t end, int whence) +{ + XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); + pgoff_t max = (end - 1) / PAGE_SIZE; + bool seek_data = (whence == SEEK_DATA); + struct page *page; + + if (end <= start) + return -ENXIO; + + rcu_read_lock(); + while ((page = find_get_entry(&xas, max, XA_PRESENT))) { + loff_t pos = xas.xa_index * PAGE_SIZE; + + if (start < pos) { + if (!seek_data) + goto unlock; + start = pos; + } + + if (page_seek_match(page, seek_data)) + goto unlock; + start = pos + seek_page_size(&xas, page); + if (!xa_is_value(page)) + put_page(page); + } + rcu_read_unlock(); + + if (seek_data) + return -ENXIO; + goto out; + +unlock: + rcu_read_unlock(); + if (!xa_is_value(page)) + put_page(page); +out: + if (start > end) + return end; + return start; +} + #ifdef CONFIG_MMU #define MMAP_LOTSAMISS (100) /* diff --git a/mm/shmem.c b/mm/shmem.c index bd5bb78128af..deb22e128435 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2668,86 +2668,20 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) return retval ? retval : error; } -/* - * llseek SEEK_DATA or SEEK_HOLE through the page cache. - */ -static pgoff_t shmem_seek_hole_data(struct address_space *mapping, - pgoff_t index, pgoff_t end, int whence) -{ - struct page *page; - struct pagevec pvec; - pgoff_t indices[PAGEVEC_SIZE]; - bool done = false; - int i; - - pagevec_init(&pvec); - pvec.nr = 1; /* start small: we may be there already */ - while (!done) { - pvec.nr = find_get_entries(mapping, index, - pvec.nr, pvec.pages, indices); - if (!pvec.nr) { - if (whence == SEEK_DATA) - index = end; - break; - } - for (i = 0; i < pvec.nr; i++, index++) { - if (index < indices[i]) { - if (whence == SEEK_HOLE) { - done = true; - break; - } - index = indices[i]; - } - page = pvec.pages[i]; - if (page && !xa_is_value(page)) { - if (!PageUptodate(page)) - page = NULL; - } - if (index >= end || - (page && whence == SEEK_DATA) || - (!page && whence == SEEK_HOLE)) { - done = true; - break; - } - } - pagevec_remove_exceptionals(&pvec); - pagevec_release(&pvec); - pvec.nr = PAGEVEC_SIZE; - cond_resched(); - } - return index; -} - static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) { struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; - pgoff_t start, end; - loff_t new_offset; if (whence != SEEK_DATA && whence != SEEK_HOLE) return generic_file_llseek_size(file, offset, whence, MAX_LFS_FILESIZE, i_size_read(inode)); + if (offset < 0) + return -ENXIO; + inode_lock(inode); /* We're holding i_mutex so we can access i_size directly */ - - if (offset < 0 || offset >= inode->i_size) - offset = -ENXIO; - else { - start = offset >> PAGE_SHIFT; - end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; - new_offset = shmem_seek_hole_data(mapping, start, end, whence); - new_offset <<= PAGE_SHIFT; - if (new_offset > offset) { - if (new_offset < inode->i_size) - offset = new_offset; - else if (whence == SEEK_DATA) - offset = -ENXIO; - else - offset = inode->i_size; - } - } - + offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); if (offset >= 0) offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); inode_unlock(inode); -- cgit v1.2.3-59-g8ed1b From ca122fe40eb463c8c11c3bfc1914f0048ca5c268 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 25 Feb 2021 17:16:00 -0800 Subject: mm: add an 'end' parameter to find_get_entries This simplifies the callers and leads to a more efficient implementation since the XArray has this functionality already. Link: https://lkml.kernel.org/r/20201112212641.27837-11-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Jan Kara Reviewed-by: William Kucharski Reviewed-by: Christoph Hellwig Cc: Dave Chinner Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagemap.h | 4 ++-- mm/filemap.c | 9 +++++---- mm/shmem.c | 10 ++-------- mm/swap.c | 2 +- 4 files changed, 10 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 3608993428d9..fdb2c4e44851 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -451,8 +451,8 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index) } unsigned find_get_entries(struct address_space *mapping, pgoff_t start, - unsigned int nr_entries, struct page **entries, - pgoff_t *indices); + pgoff_t end, unsigned int nr_entries, struct page **entries, + pgoff_t *indices); unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, pgoff_t end, unsigned int nr_pages, struct page **pages); diff --git a/mm/filemap.c b/mm/filemap.c index 61fdcdc75275..65cfdff17ac6 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1865,6 +1865,7 @@ reset: * find_get_entries - gang pagecache lookup * @mapping: The address_space to search * @start: The starting page cache index + * @end: The final page index (inclusive). * @nr_entries: The maximum number of entries * @entries: Where the resulting entries are placed * @indices: The cache indices corresponding to the entries in @entries @@ -1888,9 +1889,9 @@ reset: * * Return: the number of pages and shadow entries which were found. */ -unsigned find_get_entries(struct address_space *mapping, - pgoff_t start, unsigned int nr_entries, - struct page **entries, pgoff_t *indices) +unsigned find_get_entries(struct address_space *mapping, pgoff_t start, + pgoff_t end, unsigned int nr_entries, struct page **entries, + pgoff_t *indices) { XA_STATE(xas, &mapping->i_pages, start); struct page *page; @@ -1900,7 +1901,7 @@ unsigned find_get_entries(struct address_space *mapping, return 0; rcu_read_lock(); - while ((page = find_get_entry(&xas, ULONG_MAX, XA_PRESENT))) { + while ((page = find_get_entry(&xas, end, XA_PRESENT))) { /* * Terminate early on finding a THP, to allow the caller to * handle it all at once; but continue if this is hugetlbfs. diff --git a/mm/shmem.c b/mm/shmem.c index 86b1f5bc502c..4aac760aa2d4 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -913,8 +913,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, struct page *page = pvec.pages[i]; index = indices[i]; - if (index >= end) - break; if (xa_is_value(page)) { if (unfalloc) @@ -967,9 +965,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, while (index < end) { cond_resched(); - pvec.nr = find_get_entries(mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE), - pvec.pages, indices); + pvec.nr = find_get_entries(mapping, index, end - 1, + PAGEVEC_SIZE, pvec.pages, indices); if (!pvec.nr) { /* If all gone or hole-punch or unfalloc, we're done */ if (index == start || end != -1) @@ -982,9 +979,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, struct page *page = pvec.pages[i]; index = indices[i]; - if (index >= end) - break; - if (xa_is_value(page)) { if (unfalloc) continue; diff --git a/mm/swap.c b/mm/swap.c index ab3258afcbeb..c5773a84feab 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1046,7 +1046,7 @@ unsigned pagevec_lookup_entries(struct pagevec *pvec, pgoff_t start, unsigned nr_entries, pgoff_t *indices) { - pvec->nr = find_get_entries(mapping, start, nr_entries, + pvec->nr = find_get_entries(mapping, start, ULONG_MAX, nr_entries, pvec->pages, indices); return pagevec_count(pvec); } -- cgit v1.2.3-59-g8ed1b From 31d270fd98d196578223e5b568a0bd3bc6028b09 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 25 Feb 2021 17:16:03 -0800 Subject: mm: add an 'end' parameter to pagevec_lookup_entries Simplifies the callers and uses the existing functionality in find_get_entries(). We can also drop the final argument of truncate_exceptional_pvec_entries() and simplify the logic in that function. Link: https://lkml.kernel.org/r/20201112212641.27837-12-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Jan Kara Reviewed-by: William Kucharski Reviewed-by: Christoph Hellwig Cc: Dave Chinner Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagevec.h | 5 ++--- mm/swap.c | 8 ++++---- mm/truncate.c | 41 ++++++++++------------------------------- 3 files changed, 16 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index ad4ddc17d403..f70a9dc81504 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -26,9 +26,8 @@ struct pagevec { void __pagevec_release(struct pagevec *pvec); void __pagevec_lru_add(struct pagevec *pvec); unsigned pagevec_lookup_entries(struct pagevec *pvec, - struct address_space *mapping, - pgoff_t start, unsigned nr_entries, - pgoff_t *indices); + struct address_space *mapping, pgoff_t start, pgoff_t end, + unsigned nr_entries, pgoff_t *indices); void pagevec_remove_exceptionals(struct pagevec *pvec); unsigned pagevec_lookup_range(struct pagevec *pvec, struct address_space *mapping, diff --git a/mm/swap.c b/mm/swap.c index c5773a84feab..db8c354264a5 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1022,6 +1022,7 @@ void __pagevec_lru_add(struct pagevec *pvec) * @pvec: Where the resulting entries are placed * @mapping: The address_space to search * @start: The starting entry index + * @end: The highest index to return (inclusive). * @nr_entries: The maximum number of pages * @indices: The cache indices corresponding to the entries in @pvec * @@ -1042,11 +1043,10 @@ void __pagevec_lru_add(struct pagevec *pvec) * found. */ unsigned pagevec_lookup_entries(struct pagevec *pvec, - struct address_space *mapping, - pgoff_t start, unsigned nr_entries, - pgoff_t *indices) + struct address_space *mapping, pgoff_t start, pgoff_t end, + unsigned nr_entries, pgoff_t *indices) { - pvec->nr = find_get_entries(mapping, start, ULONG_MAX, nr_entries, + pvec->nr = find_get_entries(mapping, start, end, nr_entries, pvec->pages, indices); return pagevec_count(pvec); } diff --git a/mm/truncate.c b/mm/truncate.c index de7f4f47f780..60df23890c2d 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -57,11 +57,10 @@ static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, * exceptional entries similar to what pagevec_remove_exceptionals does. */ static void truncate_exceptional_pvec_entries(struct address_space *mapping, - struct pagevec *pvec, pgoff_t *indices, - pgoff_t end) + struct pagevec *pvec, pgoff_t *indices) { int i, j; - bool dax, lock; + bool dax; /* Handled by shmem itself */ if (shmem_mapping(mapping)) @@ -75,8 +74,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, return; dax = dax_mapping(mapping); - lock = !dax && indices[j] < end; - if (lock) + if (!dax) xa_lock_irq(&mapping->i_pages); for (i = j; i < pagevec_count(pvec); i++) { @@ -88,9 +86,6 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, continue; } - if (index >= end) - continue; - if (unlikely(dax)) { dax_delete_mapping_entry(mapping, index); continue; @@ -99,7 +94,7 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping, __clear_shadow_entry(mapping, index, page); } - if (lock) + if (!dax) xa_unlock_irq(&mapping->i_pages); pvec->nr = j; } @@ -329,7 +324,7 @@ void truncate_inode_pages_range(struct address_space *mapping, while (index < end && find_lock_entries(mapping, index, end - 1, &pvec, indices)) { index = indices[pagevec_count(&pvec) - 1] + 1; - truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); + truncate_exceptional_pvec_entries(mapping, &pvec, indices); for (i = 0; i < pagevec_count(&pvec); i++) truncate_cleanup_page(mapping, pvec.pages[i]); delete_from_page_cache_batch(mapping, &pvec); @@ -381,8 +376,8 @@ void truncate_inode_pages_range(struct address_space *mapping, index = start; for ( ; ; ) { cond_resched(); - if (!pagevec_lookup_entries(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) { + if (!pagevec_lookup_entries(&pvec, mapping, index, end - 1, + PAGEVEC_SIZE, indices)) { /* If all gone from start onwards, we're done */ if (index == start) break; @@ -390,23 +385,12 @@ void truncate_inode_pages_range(struct address_space *mapping, index = start; continue; } - if (index == start && indices[0] >= end) { - /* All gone out of hole to be punched, we're done */ - pagevec_remove_exceptionals(&pvec); - pagevec_release(&pvec); - break; - } for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; /* We rely upon deletion not changing page->index */ index = indices[i]; - if (index >= end) { - /* Restart punch to make sure all gone */ - index = start - 1; - break; - } if (xa_is_value(page)) continue; @@ -417,7 +401,7 @@ void truncate_inode_pages_range(struct address_space *mapping, truncate_inode_page(mapping, page); unlock_page(page); } - truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); + truncate_exceptional_pvec_entries(mapping, &pvec, indices); pagevec_release(&pvec); index++; } @@ -513,8 +497,6 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping, /* We rely upon deletion not changing page->index */ index = indices[i]; - if (index > end) - break; if (xa_is_value(page)) { invalidate_exceptional_entry(mapping, index, @@ -656,16 +638,13 @@ int invalidate_inode_pages2_range(struct address_space *mapping, pagevec_init(&pvec); index = start; - while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, - min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, - indices)) { + while (pagevec_lookup_entries(&pvec, mapping, index, end, + PAGEVEC_SIZE, indices)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; /* We rely upon deletion not changing page->index */ index = indices[i]; - if (index > end) - break; if (xa_is_value(page)) { if (!invalidate_exceptional_entry2(mapping, -- cgit v1.2.3-59-g8ed1b From 38cefeb33749992ceaad6ea40e12f92aa8f8e28f Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 25 Feb 2021 17:16:07 -0800 Subject: mm: remove nr_entries parameter from pagevec_lookup_entries All callers want to fetch the full size of the pvec. Link: https://lkml.kernel.org/r/20201112212641.27837-13-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Jan Kara Reviewed-by: William Kucharski Reviewed-by: Christoph Hellwig Cc: Dave Chinner Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagevec.h | 2 +- mm/swap.c | 4 ++-- mm/truncate.c | 5 ++--- 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index f70a9dc81504..72c5ea2e708d 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -27,7 +27,7 @@ void __pagevec_release(struct pagevec *pvec); void __pagevec_lru_add(struct pagevec *pvec); unsigned pagevec_lookup_entries(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, pgoff_t end, - unsigned nr_entries, pgoff_t *indices); + pgoff_t *indices); void pagevec_remove_exceptionals(struct pagevec *pvec); unsigned pagevec_lookup_range(struct pagevec *pvec, struct address_space *mapping, diff --git a/mm/swap.c b/mm/swap.c index db8c354264a5..cd9e1ed7e78f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1044,9 +1044,9 @@ void __pagevec_lru_add(struct pagevec *pvec) */ unsigned pagevec_lookup_entries(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, pgoff_t end, - unsigned nr_entries, pgoff_t *indices) + pgoff_t *indices) { - pvec->nr = find_get_entries(mapping, start, end, nr_entries, + pvec->nr = find_get_entries(mapping, start, end, PAGEVEC_SIZE, pvec->pages, indices); return pagevec_count(pvec); } diff --git a/mm/truncate.c b/mm/truncate.c index 60df23890c2d..41e7377ad58d 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -377,7 +377,7 @@ void truncate_inode_pages_range(struct address_space *mapping, for ( ; ; ) { cond_resched(); if (!pagevec_lookup_entries(&pvec, mapping, index, end - 1, - PAGEVEC_SIZE, indices)) { + indices)) { /* If all gone from start onwards, we're done */ if (index == start) break; @@ -638,8 +638,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, pagevec_init(&pvec); index = start; - while (pagevec_lookup_entries(&pvec, mapping, index, end, - PAGEVEC_SIZE, indices)) { + while (pagevec_lookup_entries(&pvec, mapping, index, end, indices)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; -- cgit v1.2.3-59-g8ed1b From cf2039af1a2eee58fdbfa68bc0c9123e77477645 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 25 Feb 2021 17:16:11 -0800 Subject: mm: pass pvec directly to find_get_entries All callers of find_get_entries() use a pvec, so pass it directly instead of manipulating it in the caller. Link: https://lkml.kernel.org/r/20201112212641.27837-14-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Jan Kara Reviewed-by: William Kucharski Cc: Christoph Hellwig Cc: Dave Chinner Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagemap.h | 3 +-- mm/filemap.c | 21 +++++++++------------ mm/shmem.c | 5 ++--- mm/swap.c | 4 +--- 4 files changed, 13 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index fdb2c4e44851..20225b067583 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -451,8 +451,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index) } unsigned find_get_entries(struct address_space *mapping, pgoff_t start, - pgoff_t end, unsigned int nr_entries, struct page **entries, - pgoff_t *indices); + pgoff_t end, struct pagevec *pvec, pgoff_t *indices); unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, pgoff_t end, unsigned int nr_pages, struct page **pages); diff --git a/mm/filemap.c b/mm/filemap.c index 65cfdff17ac6..43700480d897 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1866,14 +1866,12 @@ reset: * @mapping: The address_space to search * @start: The starting page cache index * @end: The final page index (inclusive). - * @nr_entries: The maximum number of entries - * @entries: Where the resulting entries are placed + * @pvec: Where the resulting entries are placed. * @indices: The cache indices corresponding to the entries in @entries * - * find_get_entries() will search for and return a group of up to - * @nr_entries entries in the mapping. The entries are placed at - * @entries. find_get_entries() takes a reference against any actual - * pages it returns. + * find_get_entries() will search for and return a batch of entries in + * the mapping. The entries are placed in @pvec. find_get_entries() + * takes a reference on any actual pages it returns. * * The search returns a group of mapping-contiguous page cache entries * with ascending indexes. There may be holes in the indices due to @@ -1890,15 +1888,12 @@ reset: * Return: the number of pages and shadow entries which were found. */ unsigned find_get_entries(struct address_space *mapping, pgoff_t start, - pgoff_t end, unsigned int nr_entries, struct page **entries, - pgoff_t *indices) + pgoff_t end, struct pagevec *pvec, pgoff_t *indices) { XA_STATE(xas, &mapping->i_pages, start); struct page *page; unsigned int ret = 0; - - if (!nr_entries) - return 0; + unsigned nr_entries = PAGEVEC_SIZE; rcu_read_lock(); while ((page = find_get_entry(&xas, end, XA_PRESENT))) { @@ -1913,11 +1908,13 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start, } indices[ret] = xas.xa_index; - entries[ret] = page; + pvec->pages[ret] = page; if (++ret == nr_entries) break; } rcu_read_unlock(); + + pvec->nr = ret; return ret; } diff --git a/mm/shmem.c b/mm/shmem.c index 4aac760aa2d4..ee8f21832f98 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -965,9 +965,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, while (index < end) { cond_resched(); - pvec.nr = find_get_entries(mapping, index, end - 1, - PAGEVEC_SIZE, pvec.pages, indices); - if (!pvec.nr) { + if (!find_get_entries(mapping, index, end - 1, &pvec, + indices)) { /* If all gone or hole-punch or unfalloc, we're done */ if (index == start || end != -1) break; diff --git a/mm/swap.c b/mm/swap.c index cd9e1ed7e78f..d20a746a831e 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1046,9 +1046,7 @@ unsigned pagevec_lookup_entries(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, pgoff_t end, pgoff_t *indices) { - pvec->nr = find_get_entries(mapping, start, end, PAGEVEC_SIZE, - pvec->pages, indices); - return pagevec_count(pvec); + return find_get_entries(mapping, start, end, pvec, indices); } /** -- cgit v1.2.3-59-g8ed1b From a656a20241f08be532539c7d5bd82df741c2d487 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 25 Feb 2021 17:16:14 -0800 Subject: mm: remove pagevec_lookup_entries pagevec_lookup_entries() is now just a wrapper around find_get_entries() so remove it and convert all its callers. Link: https://lkml.kernel.org/r/20201112212641.27837-15-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Jan Kara Reviewed-by: William Kucharski Reviewed-by: Christoph Hellwig Cc: Dave Chinner Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Yang Shi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagevec.h | 3 --- mm/swap.c | 36 ++---------------------------------- mm/truncate.c | 4 ++-- 3 files changed, 4 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 72c5ea2e708d..7f3f19065a9f 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -25,9 +25,6 @@ struct pagevec { void __pagevec_release(struct pagevec *pvec); void __pagevec_lru_add(struct pagevec *pvec); -unsigned pagevec_lookup_entries(struct pagevec *pvec, - struct address_space *mapping, pgoff_t start, pgoff_t end, - pgoff_t *indices); void pagevec_remove_exceptionals(struct pagevec *pvec); unsigned pagevec_lookup_range(struct pagevec *pvec, struct address_space *mapping, diff --git a/mm/swap.c b/mm/swap.c index d20a746a831e..31b844d4ed94 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1017,44 +1017,12 @@ void __pagevec_lru_add(struct pagevec *pvec) pagevec_reinit(pvec); } -/** - * pagevec_lookup_entries - gang pagecache lookup - * @pvec: Where the resulting entries are placed - * @mapping: The address_space to search - * @start: The starting entry index - * @end: The highest index to return (inclusive). - * @nr_entries: The maximum number of pages - * @indices: The cache indices corresponding to the entries in @pvec - * - * pagevec_lookup_entries() will search for and return a group of up - * to @nr_pages pages and shadow entries in the mapping. All - * entries are placed in @pvec. pagevec_lookup_entries() takes a - * reference against actual pages in @pvec. - * - * The search returns a group of mapping-contiguous entries with - * ascending indexes. There may be holes in the indices due to - * not-present entries. - * - * Only one subpage of a Transparent Huge Page is returned in one call: - * allowing truncate_inode_pages_range() to evict the whole THP without - * cycling through a pagevec of extra references. - * - * pagevec_lookup_entries() returns the number of entries which were - * found. - */ -unsigned pagevec_lookup_entries(struct pagevec *pvec, - struct address_space *mapping, pgoff_t start, pgoff_t end, - pgoff_t *indices) -{ - return find_get_entries(mapping, start, end, pvec, indices); -} - /** * pagevec_remove_exceptionals - pagevec exceptionals pruning * @pvec: The pagevec to prune * - * pagevec_lookup_entries() fills both pages and exceptional radix - * tree entries into the pagevec. This function prunes all + * find_get_entries() fills both pages and XArray value entries (aka + * exceptional entries) into the pagevec. This function prunes all * exceptionals from @pvec without leaving holes, so that it can be * passed on to page-only pagevec operations. */ diff --git a/mm/truncate.c b/mm/truncate.c index 41e7377ad58d..455944264663 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -376,7 +376,7 @@ void truncate_inode_pages_range(struct address_space *mapping, index = start; for ( ; ; ) { cond_resched(); - if (!pagevec_lookup_entries(&pvec, mapping, index, end - 1, + if (!find_get_entries(mapping, index, end - 1, &pvec, indices)) { /* If all gone from start onwards, we're done */ if (index == start) @@ -638,7 +638,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, pagevec_init(&pvec); index = start; - while (pagevec_lookup_entries(&pvec, mapping, index, end, indices)) { + while (find_get_entries(mapping, index, end, &pvec, indices)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; -- cgit v1.2.3-59-g8ed1b From 164cc4fef4456727466f8e35bb654c3994748070 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Thu, 25 Feb 2021 17:16:18 -0800 Subject: mm,thp,shmem: limit shmem THP alloc gfp_mask Patch series "mm,thp,shm: limit shmem THP alloc gfp_mask", v6. The allocation flags of anonymous transparent huge pages can be controlled through the files in /sys/kernel/mm/transparent_hugepage/defrag, which can help the system from getting bogged down in the page reclaim and compaction code when many THPs are getting allocated simultaneously. However, the gfp_mask for shmem THP allocations were not limited by those configuration settings, and some workloads ended up with all CPUs stuck on the LRU lock in the page reclaim code, trying to allocate dozens of THPs simultaneously. This patch applies the same configurated limitation of THPs to shmem hugepage allocations, to prevent that from happening. This way a THP defrag setting of "never" or "defer+madvise" will result in quick allocation failures without direct reclaim when no 2MB free pages are available. With this patch applied, THP allocations for tmpfs will be a little more aggressive than today for files mmapped with MADV_HUGEPAGE, and a little less aggressive for files that are not mmapped or mapped without that flag. This patch (of 4): The allocation flags of anonymous transparent huge pages can be controlled through the files in /sys/kernel/mm/transparent_hugepage/defrag, which can help the system from getting bogged down in the page reclaim and compaction code when many THPs are getting allocated simultaneously. However, the gfp_mask for shmem THP allocations were not limited by those configuration settings, and some workloads ended up with all CPUs stuck on the LRU lock in the page reclaim code, trying to allocate dozens of THPs simultaneously. This patch applies the same configurated limitation of THPs to shmem hugepage allocations, to prevent that from happening. Controlling the gfp_mask of THP allocations through the knobs in sysfs allows users to determine the balance between how aggressively the system tries to allocate THPs at fault time, and how much the application may end up stalling attempting those allocations. This way a THP defrag setting of "never" or "defer+madvise" will result in quick allocation failures without direct reclaim when no 2MB free pages are available. With this patch applied, THP allocations for tmpfs will be a little more aggressive than today for files mmapped with MADV_HUGEPAGE, and a little less aggressive for files that are not mmapped or mapped without that flag. Link: https://lkml.kernel.org/r/20201124194925.623931-1-riel@surriel.com Link: https://lkml.kernel.org/r/20201124194925.623931-2-riel@surriel.com Signed-off-by: Rik van Riel Acked-by: Michal Hocko Acked-by: Vlastimil Babka Cc: Xu Yu Cc: Mel Gorman Cc: Andrea Arcangeli Cc: Matthew Wilcox (Oracle) Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 2 ++ mm/huge_memory.c | 6 +++--- mm/shmem.c | 8 +++++--- 3 files changed, 10 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 220cd553a9e7..8572a1474e16 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -634,6 +634,8 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); extern void pm_restrict_gfp_mask(void); extern void pm_restore_gfp_mask(void); +extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); + #ifdef CONFIG_PM_SLEEP extern bool pm_suspended_storage(void); #else diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d77605c30f2e..395c75111d33 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -668,9 +668,9 @@ release: * available * never: never stall for any thp allocation */ -static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) +gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) { - const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); + const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); /* Always do synchronous compaction */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) @@ -762,7 +762,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) } return ret; } - gfp = alloc_hugepage_direct_gfpmask(vma); + gfp = vma_thp_gfp_mask(vma); page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); if (unlikely(!page)) { count_vm_event(THP_FAULT_FALLBACK); diff --git a/mm/shmem.c b/mm/shmem.c index ee8f21832f98..596009a44431 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1519,8 +1519,8 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp, return NULL; shmem_pseudo_vma_init(&pvma, info, hindex); - page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, - HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); + page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), + true); shmem_pseudo_vma_destroy(&pvma); if (page) prep_transhuge_page(page); @@ -1776,6 +1776,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, struct page *page; enum sgp_type sgp_huge = sgp; pgoff_t hindex = index; + gfp_t huge_gfp; int error; int once = 0; int alloced = 0; @@ -1862,7 +1863,8 @@ repeat: } alloc_huge: - page = shmem_alloc_and_acct_page(gfp, inode, index, true); + huge_gfp = vma_thp_gfp_mask(vma); + page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true); if (IS_ERR(page)) { alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode, -- cgit v1.2.3-59-g8ed1b From cd89fb06509903f942a0ffe97ffa63034671ed0c Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Thu, 25 Feb 2021 17:16:25 -0800 Subject: mm,thp,shmem: make khugepaged obey tmpfs mount flags Currently if thp enabled=[madvise], mounting a tmpfs filesystem with huge=always and mmapping files from that tmpfs does not result in khugepaged collapsing those mappings, despite the mount flag indicating that it should. Fix that by breaking up the blocks of tests in hugepage_vma_check a little bit, and testing things in the correct order. Link: https://lkml.kernel.org/r/20201124194925.623931-4-riel@surriel.com Fixes: c2231020ea7b ("mm: thp: register mm for khugepaged when merging vma for shmem") Signed-off-by: Rik van Riel Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Mel Gorman Cc: Michal Hocko Cc: Vlastimil Babka Cc: Xu Yu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/khugepaged.h | 2 ++ mm/khugepaged.c | 22 ++++++++++++++++------ 2 files changed, 18 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index c941b7377321..2fcc01891b47 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -3,6 +3,7 @@ #define _LINUX_KHUGEPAGED_H #include /* MMF_VM_HUGEPAGE */ +#include #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -57,6 +58,7 @@ static inline int khugepaged_enter(struct vm_area_struct *vma, { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) if ((khugepaged_always() || + (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) || (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && !(vm_flags & VM_NOHUGEPAGE) && !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 75e246f680f4..a7d6cb912b05 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -442,18 +442,28 @@ static inline int khugepaged_test_exit(struct mm_struct *mm) static bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags) { - if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || - (vm_flags & VM_NOHUGEPAGE) || + /* Explicitly disabled through madvise. */ + if ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) return false; - if (shmem_file(vma->vm_file) || - (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && - vma->vm_file && - (vm_flags & VM_DENYWRITE))) { + /* Enabled via shmem mount options or sysfs settings. */ + if (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) { return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, HPAGE_PMD_NR); } + + /* THP settings require madvise. */ + if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) + return false; + + /* Read-only file mappings need to be aligned for THP to work. */ + if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file && + (vm_flags & VM_DENYWRITE)) { + return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, + HPAGE_PMD_NR); + } + if (!vma->anon_vma || vma->vm_ops) return false; if (vma_is_temporary_stack(vma)) -- cgit v1.2.3-59-g8ed1b From 3c381db1fac80373f2cc0d8c1d0bcfbf8bd4fb57 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 25 Feb 2021 17:16:40 -0800 Subject: mm/page_alloc: count CMA pages per zone and print them in /proc/zoneinfo Let's count the number of CMA pages per zone and print them in /proc/zoneinfo. Having access to the total number of CMA pages per zone is helpful for debugging purposes to know where exactly the CMA pages ended up, and to figure out how many pages of a zone might behave differently, even after some of these pages might already have been allocated. As one example, CMA pages part of a kernel zone cannot be used for ordinary kernel allocations but instead behave more like ZONE_MOVABLE. For now, we are only able to get the global nr+free cma pages from /proc/meminfo and the free cma pages per zone from /proc/zoneinfo. Example after this patch when booting a 6 GiB QEMU VM with "hugetlb_cma=2G": # cat /proc/zoneinfo | grep cma cma 0 nr_free_cma 0 cma 0 nr_free_cma 0 cma 524288 nr_free_cma 493016 cma 0 cma 0 # cat /proc/meminfo | grep Cma CmaTotal: 2097152 kB CmaFree: 1972064 kB Note: We print even without CONFIG_CMA, just like "nr_free_cma"; this way, one can be sure when spotting "cma 0", that there are definetly no CMA pages located in a zone. [david@redhat.com: v2] Link: https://lkml.kernel.org/r/20210128164533.18566-1-david@redhat.com [david@redhat.com: v3] Link: https://lkml.kernel.org/r/20210129113451.22085-1-david@redhat.com Link: https://lkml.kernel.org/r/20210127101813.6370-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Oscar Salvador Acked-by: David Rientjes Cc: Thomas Gleixner Cc: "Peter Zijlstra (Intel)" Cc: Mike Rapoport Cc: Michal Hocko Cc: Wei Yang Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 15 +++++++++++++++ mm/page_alloc.c | 1 + mm/vmstat.c | 6 ++++-- 3 files changed, 20 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9198b7ade85f..5f9c4dad73ed 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -503,6 +503,9 @@ struct zone { * bootmem allocator): * managed_pages = present_pages - reserved_pages; * + * cma pages is present pages that are assigned for CMA use + * (MIGRATE_CMA). + * * So present_pages may be used by memory hotplug or memory power * management logic to figure out unmanaged pages by checking * (present_pages - managed_pages). And managed_pages should be used @@ -527,6 +530,9 @@ struct zone { atomic_long_t managed_pages; unsigned long spanned_pages; unsigned long present_pages; +#ifdef CONFIG_CMA + unsigned long cma_pages; +#endif const char *name; @@ -624,6 +630,15 @@ static inline unsigned long zone_managed_pages(struct zone *zone) return (unsigned long)atomic_long_read(&zone->managed_pages); } +static inline unsigned long zone_cma_pages(struct zone *zone) +{ +#ifdef CONFIG_CMA + return zone->cma_pages; +#else + return 0; +#endif +} + static inline unsigned long zone_end_pfn(const struct zone *zone) { return zone->zone_start_pfn + zone->spanned_pages; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ddccc59f2f72..3e4b29ee2b1e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2168,6 +2168,7 @@ void __init init_cma_reserved_pageblock(struct page *page) } adjust_managed_page_count(page, pageblock_nr_pages); + page_zone(page)->cma_pages += pageblock_nr_pages; } #endif diff --git a/mm/vmstat.c b/mm/vmstat.c index a0e949542204..6cdf789ced5e 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1637,14 +1637,16 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, "\n high %lu" "\n spanned %lu" "\n present %lu" - "\n managed %lu", + "\n managed %lu" + "\n cma %lu", zone_page_state(zone, NR_FREE_PAGES), min_wmark_pages(zone), low_wmark_pages(zone), high_wmark_pages(zone), zone->spanned_pages, zone->present_pages, - zone_managed_pages(zone)); + zone_managed_pages(zone), + zone_cma_pages(zone)); seq_printf(m, "\n protection: (%ld", -- cgit v1.2.3-59-g8ed1b From 629484ae73754243917e06d8d5e5f37c26e99399 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 25 Feb 2021 17:16:51 -0800 Subject: mm: vmstat: add some comments on internal storage of byte items Byte-accounted items are used for slab object accounting at the cgroup level, because the objects in a slab page can belong to different cgroups. At the global level these items always change in multiples of whole slab pages. The vmstat code exploits this and stores these items as pages internally, which allows for more compact per-cpu data. This optimization isn't self-evident from the asserts and the division in the stat update functions. Provide the reader with some context. Link: https://lkml.kernel.org/r/20210202184411.118614-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vmstat.h | 6 ++++++ mm/vmstat.c | 12 ++++++++++++ 2 files changed, 18 insertions(+) (limited to 'include/linux') diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 773135fc6e19..506d625163a1 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -313,6 +313,12 @@ static inline void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, int delta) { if (vmstat_item_in_bytes(item)) { + /* + * Only cgroups use subpage accounting right now; at + * the global level, these items still change in + * multiples of whole pages. Store them as pages + * internally to keep the per-cpu counters compact. + */ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); delta >>= PAGE_SHIFT; } diff --git a/mm/vmstat.c b/mm/vmstat.c index 0b0fc3b77789..e60b36f5f0a9 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -342,6 +342,12 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, long t; if (vmstat_item_in_bytes(item)) { + /* + * Only cgroups use subpage accounting right now; at + * the global level, these items still change in + * multiples of whole pages. Store them as pages + * internally to keep the per-cpu counters compact. + */ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); delta >>= PAGE_SHIFT; } @@ -551,6 +557,12 @@ static inline void mod_node_state(struct pglist_data *pgdat, long o, n, t, z; if (vmstat_item_in_bytes(item)) { + /* + * Only cgroups use subpage accounting right now; at + * the global level, these items still change in + * multiples of whole pages. Store them as pages + * internally to keep the per-cpu counters compact. + */ VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); delta >>= PAGE_SHIFT; } -- cgit v1.2.3-59-g8ed1b From 9f605f260594f99b950062fd62244251e85dbd2b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 25 Feb 2021 17:16:57 -0800 Subject: mm: move pfn_to_online_page() out of line Patch series "mm: Fix pfn_to_online_page() with respect to ZONE_DEVICE", v4. A pfn-walker that uses pfn_to_online_page() may inadvertently translate a pfn as online and in the page allocator, when it is offline managed by a ZONE_DEVICE mapping (details in Patch 3: ("mm: Teach pfn_to_online_page() about ZONE_DEVICE section collisions")). The 2 proposals under consideration are teach pfn_to_online_page() to be precise in the presence of mixed-zone sections, or teach the memory-add code to drop the System RAM associated with ZONE_DEVICE collisions. In order to not regress memory capacity by a few 10s to 100s of MiB the approach taken in this set is to add precision to pfn_to_online_page(). In the course of validating pfn_to_online_page() a couple other fixes fell out: 1/ soft_offline_page() fails to drop the reference taken in the madvise(..., MADV_SOFT_OFFLINE) case. 2/ memory_failure() uses get_dev_pagemap() to lookup ZONE_DEVICE pages, however that mapping may contain data pages and metadata raw pfns. Introduce pgmap_pfn_valid() to delineate the 2 types and fail the handling of raw metadata pfns. This patch (of 4); pfn_to_online_page() is already too large to be a macro or an inline function. In anticipation of further logic changes / growth, move it out of line. No functional change, just code movement. Link: https://lkml.kernel.org/r/161058499000.1840162.702316708443239771.stgit@dwillia2-desk3.amr.corp.intel.com Link: https://lkml.kernel.org/r/161058499608.1840162.10165648147615238793.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams Reported-by: Michal Hocko Acked-by: Michal Hocko Reviewed-by: David Hildenbrand Reviewed-by: Oscar Salvador Cc: Naoya Horiguchi Cc: Qian Cai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memory_hotplug.h | 17 +---------------- mm/memory_hotplug.c | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 15acce5ab106..3d99de0db2dd 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -16,22 +16,7 @@ struct resource; struct vmem_altmap; #ifdef CONFIG_MEMORY_HOTPLUG -/* - * Return page for the valid pfn only if the page is online. All pfn - * walkers which rely on the fully initialized page->flags and others - * should use this rather than pfn_valid && pfn_to_page - */ -#define pfn_to_online_page(pfn) \ -({ \ - struct page *___page = NULL; \ - unsigned long ___pfn = pfn; \ - unsigned long ___nr = pfn_to_section_nr(___pfn); \ - \ - if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ - pfn_valid_within(___pfn)) \ - ___page = pfn_to_page(___pfn); \ - ___page; \ -}) +struct page *pfn_to_online_page(unsigned long pfn); /* * Types for free bootmem stored in page->lru.next. These have to be in diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index abe43c1ae920..fc6cdd99941b 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -300,6 +300,22 @@ static int check_hotplug_memory_addressable(unsigned long pfn, return 0; } +/* + * Return page for the valid pfn only if the page is online. All pfn + * walkers which rely on the fully initialized page->flags and others + * should use this rather than pfn_valid && pfn_to_page + */ +struct page *pfn_to_online_page(unsigned long pfn) +{ + unsigned long nr = pfn_to_section_nr(pfn); + + if (nr < NR_MEM_SECTIONS && online_section_nr(nr) && + pfn_valid_within(pfn)) + return pfn_to_page(pfn); + return NULL; +} +EXPORT_SYMBOL_GPL(pfn_to_online_page); + /* * Reasonably generic function for adding memory. It is * expected that archs that support memory hotplug will -- cgit v1.2.3-59-g8ed1b From 1f90a3477df3ff1a91e064af554cdc887c8f9e5e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 25 Feb 2021 17:17:05 -0800 Subject: mm: teach pfn_to_online_page() about ZONE_DEVICE section collisions While pfn_to_online_page() is able to determine pfn_valid() at subsection granularity it is not able to reliably determine if a given pfn is also online if the section is mixes ZONE_{NORMAL,MOVABLE} with ZONE_DEVICE. This means that pfn_to_online_page() may return invalid @page objects. For example with a memory map like: 100000000-1fbffffff : System RAM 142000000-143002e16 : Kernel code 143200000-143713fff : Kernel rodata 143800000-143b15b7f : Kernel data 144227000-144ffffff : Kernel bss 1fc000000-2fbffffff : Persistent Memory (legacy) 1fc000000-2fbffffff : namespace0.0 This command: echo 0x1fc000000 > /sys/devices/system/memory/soft_offline_page ...succeeds when it should fail. When it succeeds it touches an uninitialized page and may crash or cause other damage (see dissolve_free_huge_page()). While the memory map above is contrived via the memmap=ss!nn kernel command line option, the collision happens in practice on shipping platforms. The memory controller resources that decode spans of physical address space are a limited resource. One technique platform-firmware uses to conserve those resources is to share a decoder across 2 devices to keep the address range contiguous. Unfortunately the unit of operation of a decoder is 64MiB while the Linux section size is 128MiB. This results in situations where, without subsection hotplug memory mappings with different lifetimes collide into one object that can only express one lifetime. Update move_pfn_range_to_zone() to flag (SECTION_TAINT_ZONE_DEVICE) a section that mixes ZONE_DEVICE pfns with other online pfns. With SECTION_TAINT_ZONE_DEVICE to delineate, pfn_to_online_page() can fall back to a slow-path check for ZONE_DEVICE pfns in an online section. In the fast path online_section() for a full ZONE_DEVICE section returns false. Because the collision case is rare, and for simplicity, the SECTION_TAINT_ZONE_DEVICE flag is never cleared once set. [dan.j.williams@intel.com: fix CONFIG_ZONE_DEVICE=n build] Link: https://lkml.kernel.org/r/CAPcyv4iX+7LAgAeSqx7Zw-Zd=ZV9gBv8Bo7oTbwCOOqJoZ3+Yg@mail.gmail.com Link: https://lkml.kernel.org/r/161058500675.1840162.7887862152161279354.stgit@dwillia2-desk3.amr.corp.intel.com Fixes: ba72b4c8cf60 ("mm/sparsemem: support sub-section hotplug") Signed-off-by: Dan Williams Reported-by: Michal Hocko Acked-by: Michal Hocko Reported-by: David Hildenbrand Reviewed-by: David Hildenbrand Reviewed-by: Oscar Salvador Cc: Naoya Horiguchi Cc: Qian Cai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 34 +++++++++++++++++++++++++++------- mm/memory_hotplug.c | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5f9c4dad73ed..47946cec7584 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -918,6 +918,18 @@ static inline int local_memory_node(int node_id) { return node_id; }; */ #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) +#ifdef CONFIG_ZONE_DEVICE +static inline bool zone_is_zone_device(struct zone *zone) +{ + return zone_idx(zone) == ZONE_DEVICE; +} +#else +static inline bool zone_is_zone_device(struct zone *zone) +{ + return false; +} +#endif + /* * Returns true if a zone has pages managed by the buddy allocator. * All the reclaim decisions have to use this function rather than @@ -1306,13 +1318,14 @@ extern size_t mem_section_usage_size(void); * which results in PFN_SECTION_SHIFT equal 6. * To sum it up, at least 6 bits are available. */ -#define SECTION_MARKED_PRESENT (1UL<<0) -#define SECTION_HAS_MEM_MAP (1UL<<1) -#define SECTION_IS_ONLINE (1UL<<2) -#define SECTION_IS_EARLY (1UL<<3) -#define SECTION_MAP_LAST_BIT (1UL<<4) -#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) -#define SECTION_NID_SHIFT 3 +#define SECTION_MARKED_PRESENT (1UL<<0) +#define SECTION_HAS_MEM_MAP (1UL<<1) +#define SECTION_IS_ONLINE (1UL<<2) +#define SECTION_IS_EARLY (1UL<<3) +#define SECTION_TAINT_ZONE_DEVICE (1UL<<4) +#define SECTION_MAP_LAST_BIT (1UL<<5) +#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) +#define SECTION_NID_SHIFT 3 static inline struct page *__section_mem_map_addr(struct mem_section *section) { @@ -1351,6 +1364,13 @@ static inline int online_section(struct mem_section *section) return (section && (section->section_mem_map & SECTION_IS_ONLINE)); } +static inline int online_device_section(struct mem_section *section) +{ + unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE; + + return section && ((section->section_mem_map & flags) == flags); +} + static inline int online_section_nr(unsigned long nr) { return online_section(__nr_to_section(nr)); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 02378f11e2d6..3af4d3851d1a 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -308,6 +308,7 @@ static int check_hotplug_memory_addressable(unsigned long pfn, struct page *pfn_to_online_page(unsigned long pfn) { unsigned long nr = pfn_to_section_nr(pfn); + struct dev_pagemap *pgmap; struct mem_section *ms; if (nr >= NR_MEM_SECTIONS) @@ -327,6 +328,22 @@ struct page *pfn_to_online_page(unsigned long pfn) if (!pfn_section_valid(ms, pfn)) return NULL; + if (!online_device_section(ms)) + return pfn_to_page(pfn); + + /* + * Slowpath: when ZONE_DEVICE collides with + * ZONE_{NORMAL,MOVABLE} within the same section some pfns in + * the section may be 'offline' but 'valid'. Only + * get_dev_pagemap() can determine sub-section online status. + */ + pgmap = get_dev_pagemap(pfn, NULL); + put_dev_pagemap(pgmap); + + /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */ + if (pgmap) + return NULL; + return pfn_to_page(pfn); } EXPORT_SYMBOL_GPL(pfn_to_online_page); @@ -709,6 +726,14 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; } + +static void section_taint_zone_device(unsigned long pfn) +{ + struct mem_section *ms = __pfn_to_section(pfn); + + ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE; +} + /* * Associate the pfn range with the given zone, initializing the memmaps * and resizing the pgdat/zone data to span the added pages. After this @@ -738,6 +763,19 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, resize_pgdat_range(pgdat, start_pfn, nr_pages); pgdat_resize_unlock(pgdat, &flags); + /* + * Subsection population requires care in pfn_to_online_page(). + * Set the taint to enable the slow path detection of + * ZONE_DEVICE pages in an otherwise ZONE_{NORMAL,MOVABLE} + * section. + */ + if (zone_is_zone_device(zone)) { + if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION)) + section_taint_zone_device(start_pfn); + if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)) + section_taint_zone_device(start_pfn + nr_pages); + } + /* * TODO now we have a visible range of pages which are not associated * with their zone properly. Not nice but set_pfnblock_flags_mask -- cgit v1.2.3-59-g8ed1b From 34dc45be4563f344d59ba0428416d0d265aa4f4d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 25 Feb 2021 17:17:08 -0800 Subject: mm: fix memory_failure() handling of dax-namespace metadata Given 'struct dev_pagemap' spans both data pages and metadata pages be careful to consult the altmap if present to delineate metadata. In fact the pfn_first() helper already identifies the first valid data pfn, so export that helper for other code paths via pgmap_pfn_valid(). Other usage of get_dev_pagemap() are not a concern because those are operating on known data pfns having been looked up by get_user_pages(). I.e. metadata pfns are never user mapped. Link: https://lkml.kernel.org/r/161058501758.1840162.4239831989762604527.stgit@dwillia2-desk3.amr.corp.intel.com Fixes: 6100e34b2526 ("mm, memory_failure: Teach memory_failure() about dev_pagemap pages") Signed-off-by: Dan Williams Reported-by: David Hildenbrand Reviewed-by: David Hildenbrand Reviewed-by: Naoya Horiguchi Cc: Michal Hocko Cc: Oscar Salvador Cc: Qian Cai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memremap.h | 6 ++++++ mm/memory-failure.c | 6 ++++++ mm/memremap.c | 15 +++++++++++++++ 3 files changed, 27 insertions(+) (limited to 'include/linux') diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 79c49e7f5c30..f5b464daeeca 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -137,6 +137,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); struct dev_pagemap *get_dev_pagemap(unsigned long pfn, struct dev_pagemap *pgmap); +bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); @@ -165,6 +166,11 @@ static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, return NULL; } +static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) +{ + return false; +} + static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) { return 0; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 55c671904aac..24210c9bd843 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1312,6 +1312,12 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags, */ put_page(page); + /* device metadata space is not recoverable */ + if (!pgmap_pfn_valid(pgmap, pfn)) { + rc = -ENXIO; + goto out; + } + /* * Prevent the inode from being freed while we are interrogating * the address_space, typically this would be handled by diff --git a/mm/memremap.c b/mm/memremap.c index 16b2fb482da1..2455bac89506 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -80,6 +80,21 @@ static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) return pfn + vmem_altmap_offset(pgmap_altmap(pgmap)); } +bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) +{ + int i; + + for (i = 0; i < pgmap->nr_range; i++) { + struct range *range = &pgmap->ranges[i]; + + if (pfn >= PHYS_PFN(range->start) && + pfn <= PHYS_PFN(range->end)) + return pfn >= pfn_first(pgmap, i); + } + + return false; +} + static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) { const struct range *range = &pgmap->ranges[range_id]; -- cgit v1.2.3-59-g8ed1b From 1adf8b468ff6bc64ba01ce3848da4bcf409215b4 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 25 Feb 2021 17:17:13 -0800 Subject: mm/memory_hotplug: rename all existing 'memhp' into 'mhp' This renames all 'memhp' instances to 'mhp' except for memhp_default_state for being a kernel command line option. This is just a clean up and should not cause a functional change. Let's make it consistent rater than mixing the two prefixes. In preparation for more users of the 'mhp' terminology. Link: https://lkml.kernel.org/r/1611554093-27316-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Suggested-by: David Hildenbrand Reviewed-by: David Hildenbrand Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 10 +++++----- include/linux/memory_hotplug.h | 4 ++-- mm/memory_hotplug.c | 12 ++++++------ 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/memory.c b/drivers/base/memory.c index eef4ffb6122c..901e379676be 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -35,7 +35,7 @@ static const char *const online_type_to_str[] = { [MMOP_ONLINE_MOVABLE] = "online_movable", }; -int memhp_online_type_from_str(const char *str) +int mhp_online_type_from_str(const char *str) { int i; @@ -253,7 +253,7 @@ static int memory_subsys_offline(struct device *dev) static ssize_t state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - const int online_type = memhp_online_type_from_str(buf); + const int online_type = mhp_online_type_from_str(buf); struct memory_block *mem = to_memory_block(dev); int ret; @@ -387,19 +387,19 @@ static ssize_t auto_online_blocks_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", - online_type_to_str[memhp_default_online_type]); + online_type_to_str[mhp_default_online_type]); } static ssize_t auto_online_blocks_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - const int online_type = memhp_online_type_from_str(buf); + const int online_type = mhp_online_type_from_str(buf); if (online_type < 0) return -EINVAL; - memhp_default_online_type = online_type; + mhp_default_online_type = online_type; return count; } diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 3d99de0db2dd..ca5e8d137726 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -116,10 +116,10 @@ extern int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params); extern u64 max_mem_size; -extern int memhp_online_type_from_str(const char *str); +extern int mhp_online_type_from_str(const char *str); /* Default online_type (MMOP_*) when new memory blocks are added. */ -extern int memhp_default_online_type; +extern int mhp_default_online_type; /* If movable_node boot option specified */ extern bool movable_node_enabled; static inline bool movable_node_is_enabled(void) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 3af4d3851d1a..ac1c686a5989 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -67,17 +67,17 @@ void put_online_mems(void) bool movable_node_enabled = false; #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE -int memhp_default_online_type = MMOP_OFFLINE; +int mhp_default_online_type = MMOP_OFFLINE; #else -int memhp_default_online_type = MMOP_ONLINE; +int mhp_default_online_type = MMOP_ONLINE; #endif static int __init setup_memhp_default_state(char *str) { - const int online_type = memhp_online_type_from_str(str); + const int online_type = mhp_online_type_from_str(str); if (online_type >= 0) - memhp_default_online_type = online_type; + mhp_default_online_type = online_type; return 1; } @@ -1076,7 +1076,7 @@ static int check_hotplug_memory_range(u64 start, u64 size) static int online_memory_block(struct memory_block *mem, void *arg) { - mem->online_type = memhp_default_online_type; + mem->online_type = mhp_default_online_type; return device_online(&mem->dev); } @@ -1157,7 +1157,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) merge_system_ram_resource(res); /* online pages if requested */ - if (memhp_default_online_type != MMOP_OFFLINE) + if (mhp_default_online_type != MMOP_OFFLINE) walk_memory_blocks(start, size, NULL, online_memory_block); return ret; -- cgit v1.2.3-59-g8ed1b From 26011267e1a7ddaab50b5f81b402ca3e7fc2887c Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 25 Feb 2021 17:17:17 -0800 Subject: mm/memory_hotplug: MEMHP_MERGE_RESOURCE -> MHP_MERGE_RESOURCE Let's make "MEMHP_MERGE_RESOURCE" consistent with "MHP_NONE", "mhp_t" and "mhp_flags". As discussed recently [1], "mhp" is our internal acronym for memory hotplug now. [1] https://lore.kernel.org/linux-mm/c37de2d0-28a1-4f7d-f944-cfd7d81c334d@redhat.com/ Link: https://lkml.kernel.org/r/20210126115829.10909-1-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Miaohe Lin Acked-by: Michael S. Tsirkin Reviewed-by: Oscar Salvador Acked-by: Wei Liu Reviewed-by: Pankaj Gupta Cc: "K. Y. Srinivasan" Cc: Haiyang Zhang Cc: Stephen Hemminger Cc: Jason Wang Cc: Boris Ostrovsky Cc: Juergen Gross Cc: Stefano Stabellini Cc: Michal Hocko Cc: Anshuman Khandual Cc: Wei Yang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/hv/hv_balloon.c | 2 +- drivers/virtio/virtio_mem.c | 2 +- drivers/xen/balloon.c | 2 +- include/linux/memory_hotplug.h | 2 +- mm/memory_hotplug.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 8c471823a5af..2f776d78e3c1 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -726,7 +726,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn)); ret = add_memory(nid, PFN_PHYS((start_pfn)), - (HA_CHUNK << PAGE_SHIFT), MEMHP_MERGE_RESOURCE); + (HA_CHUNK << PAGE_SHIFT), MHP_MERGE_RESOURCE); if (ret) { pr_err("hot_add memory failed error is %d\n", ret); diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 9fc9ec4a25f5..d44e43869f17 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -623,7 +623,7 @@ static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr, /* Memory might get onlined immediately. */ atomic64_add(size, &vm->offline_size); rc = add_memory_driver_managed(vm->nid, addr, size, vm->resource_name, - MEMHP_MERGE_RESOURCE); + MHP_MERGE_RESOURCE); if (rc) { atomic64_sub(size, &vm->offline_size); dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc); diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index b57b2067ecbf..671c71245a7b 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -331,7 +331,7 @@ static enum bp_state reserve_additional_memory(void) mutex_unlock(&balloon_mutex); /* add_memory_resource() requires the device_hotplug lock */ lock_device_hotplug(); - rc = add_memory_resource(nid, resource, MEMHP_MERGE_RESOURCE); + rc = add_memory_resource(nid, resource, MHP_MERGE_RESOURCE); unlock_device_hotplug(); mutex_lock(&balloon_mutex); diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index ca5e8d137726..08eeef679ab7 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -53,7 +53,7 @@ typedef int __bitwise mhp_t; * with this flag set, the resource pointer must no longer be used as it * might be stale, or the resource might have changed. */ -#define MEMHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) +#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) /* * Extended parameters for memory hotplug: diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index ac1c686a5989..6a02c3f42717 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1153,7 +1153,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) * In case we're allowed to merge the resource, flag it and trigger * merging now that adding succeeded. */ - if (mhp_flags & MEMHP_MERGE_RESOURCE) + if (mhp_flags & MHP_MERGE_RESOURCE) merge_system_ram_resource(res); /* online pages if requested */ -- cgit v1.2.3-59-g8ed1b From e9a2e48e8704c9d20a625c6f2357147d03ea7b97 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 25 Feb 2021 17:17:24 -0800 Subject: drivers/base/memory: don't store phys_device in memory blocks No need to store the value for each and every memory block, as we can easily query the value at runtime. Reshuffle the members to optimize the memory layout. Also, let's clarify what the interface once was used for and why it's legacy nowadays. "phys_device" was used on s390x in older versions of lsmem[2]/chmem[3], back when they were still part of s390x-tools. They were later replaced by the variants in linux-utils. For example, RHEL6 and RHEL7 contain lsmem/chmem from s390-utils. RHEL8 switched to versions from util-linux on s390x [4]. "phys_device" was added with sysfs support for memory hotplug in commit 3947be1969a9 ("[PATCH] memory hotplug: sysfs and add/remove functions") in 2005. It always returned 0. s390x started returning something != 0 on some setups (if sclp.rzm is set by HW) in 2010 via commit 57b552ba0b2f ("memory hotplug/s390: set phys_device"). For s390x, it allowed for identifying which memory block devices belong to the same storage increment (RZM). Only if all memory block devices comprising a single storage increment were offline, the memory could actually be removed in the hypervisor. Since commit e5d709bb5fb7 ("s390/memory hotplug: provide memory_block_size_bytes() function") in 2013 a memory block device spans at least one storage increment - which is why the interface isn't really helpful/used anymore (except by old lsmem/chmem tools). There were once RFC patches to make use of "phys_device" in ACPI context; however, the underlying problem could be solved using different interfaces [1]. [1] https://patchwork.kernel.org/patch/2163871/ [2] https://github.com/ibm-s390-tools/s390-tools/blob/v2.1.0/zconf/lsmem [3] https://github.com/ibm-s390-tools/s390-tools/blob/v2.1.0/zconf/chmem [4] https://bugzilla.redhat.com/show_bug.cgi?id=1504134 Link: https://lkml.kernel.org/r/20210201181347.13262-2-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Michal Hocko Reviewed-by: Oscar Salvador Cc: Dave Hansen Cc: Greg Kroah-Hartman Cc: Gerald Schaefer Cc: Jonathan Corbet Cc: "Rafael J. Wysocki" Cc: Mauro Carvalho Chehab Cc: Ilya Dryomov Cc: Vaibhav Jain Cc: Tom Rix Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/ABI/testing/sysfs-devices-memory | 5 +++-- Documentation/admin-guide/mm/memory-hotplug.rst | 4 ++-- drivers/base/memory.c | 25 +++++++++---------------- include/linux/memory.h | 3 +-- 4 files changed, 15 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory index 246a45b96d22..58dbc592bc57 100644 --- a/Documentation/ABI/testing/sysfs-devices-memory +++ b/Documentation/ABI/testing/sysfs-devices-memory @@ -26,8 +26,9 @@ Date: September 2008 Contact: Badari Pulavarty Description: The file /sys/devices/system/memory/memoryX/phys_device - is read-only and is designed to show the name of physical - memory device. Implementation is currently incomplete. + is read-only; it is a legacy interface only ever used on s390x + to expose the covered storage increment. +Users: Legacy s390-tools lsmem/chmem What: /sys/devices/system/memory/memoryX/phys_index Date: September 2008 diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst index 5c4432c96c4b..245739f55ac7 100644 --- a/Documentation/admin-guide/mm/memory-hotplug.rst +++ b/Documentation/admin-guide/mm/memory-hotplug.rst @@ -160,8 +160,8 @@ Under each memory block, you can see 5 files: "online_movable", "online", "offline" command which will be performed on all sections in the block. -``phys_device`` read-only: designed to show the name of physical memory - device. This is not well implemented now. +``phys_device`` read-only: legacy interface only ever used on s390x to + expose the covered storage increment. ``removable`` read-only: contains an integer value indicating whether the memory block is removable or not removable. A value of 1 indicates that the memory diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 901e379676be..f35298425575 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -290,20 +290,20 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, } /* - * phys_device is a bad name for this. What I really want - * is a way to differentiate between memory ranges that - * are part of physical devices that constitute - * a complete removable unit or fru. - * i.e. do these ranges belong to the same physical device, - * s.t. if I offline all of these sections I can then - * remove the physical device? + * Legacy interface that we cannot remove: s390x exposes the storage increment + * covered by a memory block, allowing for identifying which memory blocks + * comprise a storage increment. Since a memory block spans complete + * storage increments nowadays, this interface is basically unused. Other + * archs never exposed != 0. */ static ssize_t phys_device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct memory_block *mem = to_memory_block(dev); + unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); - return sysfs_emit(buf, "%d\n", mem->phys_device); + return sysfs_emit(buf, "%d\n", + arch_get_memory_phys_device(start_pfn)); } #ifdef CONFIG_MEMORY_HOTREMOVE @@ -488,11 +488,7 @@ static DEVICE_ATTR_WO(soft_offline_page); static DEVICE_ATTR_WO(hard_offline_page); #endif -/* - * Note that phys_device is optional. It is here to allow for - * differentiation between which *physical* devices each - * section belongs to... - */ +/* See phys_device_show(). */ int __weak arch_get_memory_phys_device(unsigned long start_pfn) { return 0; @@ -574,7 +570,6 @@ int register_memory(struct memory_block *memory) static int init_memory_block(unsigned long block_id, unsigned long state) { struct memory_block *mem; - unsigned long start_pfn; int ret = 0; mem = find_memory_block_by_id(block_id); @@ -588,8 +583,6 @@ static int init_memory_block(unsigned long block_id, unsigned long state) mem->start_section_nr = block_id * sections_per_block; mem->state = state; - start_pfn = section_nr_to_pfn(mem->start_section_nr); - mem->phys_device = arch_get_memory_phys_device(start_pfn); mem->nid = NUMA_NO_NODE; ret = register_memory(mem); diff --git a/include/linux/memory.h b/include/linux/memory.h index 439a89e758d8..4da95e684e20 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -27,9 +27,8 @@ struct memory_block { unsigned long start_section_nr; unsigned long state; /* serialized by the dev->lock */ int online_type; /* for passing data to online routine */ - int phys_device; /* to which fru does this belong? */ - struct device dev; int nid; /* NID for this memory block */ + struct device dev; }; int arch_get_memory_phys_device(unsigned long start_pfn); -- cgit v1.2.3-59-g8ed1b From bca3feaa0764ab5a4cbe6817871601f1d00c059d Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 25 Feb 2021 17:17:33 -0800 Subject: mm/memory_hotplug: prevalidate the address range being added with platform Patch series "mm/memory_hotplug: Pre-validate the address range with platform", v5. This series adds a mechanism allowing platforms to weigh in and prevalidate incoming address range before proceeding further with the memory hotplug. This helps prevent potential platform errors for the given address range, down the hotplug call chain, which inevitably fails the hotplug itself. This mechanism was suggested by David Hildenbrand during another discussion with respect to a memory hotplug fix on arm64 platform. https://lore.kernel.org/linux-arm-kernel/1600332402-30123-1-git-send-email-anshuman.khandual@arm.com/ This mechanism focuses on the addressibility aspect and not [sub] section alignment aspect. Hence check_hotplug_memory_range() and check_pfn_span() have been left unchanged. This patch (of 4): This introduces mhp_range_allowed() which can be called in various memory hotplug paths to prevalidate the address range which is being added, with the platform. Then mhp_range_allowed() calls mhp_get_pluggable_range() which provides applicable address range depending on whether linear mapping is required or not. For ranges that require linear mapping, it calls a new arch callback arch_get_mappable_range() which the platform can override. So the new callback, in turn provides the platform an opportunity to configure acceptable memory hotplug address ranges in case there are constraints. This mechanism will help prevent platform specific errors deep down during hotplug calls. This drops now redundant check_hotplug_memory_addressable() check in __add_pages() but instead adds a VM_BUG_ON() check which would ensure that the range has been validated with mhp_range_allowed() earlier in the call chain. Besides mhp_get_pluggable_range() also can be used by potential memory hotplug callers to avail the allowed physical range which would go through on a given platform. This does not really add any new range check in generic memory hotplug but instead compensates for lost checks in arch_add_memory() where applicable and check_hotplug_memory_addressable(), with unified mhp_range_allowed(). [akpm@linux-foundation.org: make pagemap_range() return -EINVAL when mhp_range_allowed() fails] Link: https://lkml.kernel.org/r/1612149902-7867-1-git-send-email-anshuman.khandual@arm.com Link: https://lkml.kernel.org/r/1612149902-7867-2-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Suggested-by: David Hildenbrand Reviewed-by: David Hildenbrand Reviewed-by: Oscar Salvador Cc: Heiko Carstens Cc: Catalin Marinas Cc: Vasily Gorbik # s390 Cc: Will Deacon Cc: Ard Biesheuvel Cc: Mark Rutland Cc: Jason Wang Cc: Jonathan Cameron Cc: "Michael S. Tsirkin" Cc: Michal Hocko Cc: Pankaj Gupta Cc: Pankaj Gupta Cc: teawater Cc: Wei Yang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memory_hotplug.h | 10 ++++++ mm/memory_hotplug.c | 78 ++++++++++++++++++++++++++++++++---------- mm/memremap.c | 8 ++++- 3 files changed, 76 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 08eeef679ab7..7288aa5ef73b 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -66,6 +66,9 @@ struct mhp_params { pgprot_t pgprot; }; +bool mhp_range_allowed(u64 start, u64 size, bool need_mapping); +struct range mhp_get_pluggable_range(bool need_mapping); + /* * Zone resizing functions * @@ -266,6 +269,13 @@ static inline bool movable_node_is_enabled(void) } #endif /* ! CONFIG_MEMORY_HOTPLUG */ +/* + * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some + * platforms might override and use arch_get_mappable_range() + * for internal non memory hotplug purposes. + */ +struct range arch_get_mappable_range(void); + #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) /* * pgdat resizing functions diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a969463bdda4..5ba51a8bdaeb 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -107,6 +107,9 @@ static struct resource *register_memory_resource(u64 start, u64 size, if (strcmp(resource_name, "System RAM")) flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED; + if (!mhp_range_allowed(start, size, true)) + return ERR_PTR(-E2BIG); + /* * Make sure value parsed from 'mem=' only restricts memory adding * while booting, so that memory hotplug won't be impacted. Please @@ -284,22 +287,6 @@ static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, return 0; } -static int check_hotplug_memory_addressable(unsigned long pfn, - unsigned long nr_pages) -{ - const u64 max_addr = PFN_PHYS(pfn + nr_pages) - 1; - - if (max_addr >> MAX_PHYSMEM_BITS) { - const u64 max_allowed = (1ull << (MAX_PHYSMEM_BITS + 1)) - 1; - WARN(1, - "Hotplugged memory exceeds maximum addressable address, range=%#llx-%#llx, maximum=%#llx\n", - (u64)PFN_PHYS(pfn), max_addr, max_allowed); - return -E2BIG; - } - - return 0; -} - /* * Return page for the valid pfn only if the page is online. All pfn * walkers which rely on the fully initialized page->flags and others @@ -365,9 +352,7 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, if (WARN_ON_ONCE(!params->pgprot.pgprot)) return -EINVAL; - err = check_hotplug_memory_addressable(pfn, nr_pages); - if (err) - return err; + VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); if (altmap) { /* @@ -1248,6 +1233,61 @@ out_unlock: } EXPORT_SYMBOL_GPL(add_memory_driver_managed); +/* + * Platforms should define arch_get_mappable_range() that provides + * maximum possible addressable physical memory range for which the + * linear mapping could be created. The platform returned address + * range must adhere to these following semantics. + * + * - range.start <= range.end + * - Range includes both end points [range.start..range.end] + * + * There is also a fallback definition provided here, allowing the + * entire possible physical address range in case any platform does + * not define arch_get_mappable_range(). + */ +struct range __weak arch_get_mappable_range(void) +{ + struct range mhp_range = { + .start = 0UL, + .end = -1ULL, + }; + return mhp_range; +} + +struct range mhp_get_pluggable_range(bool need_mapping) +{ + const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1; + struct range mhp_range; + + if (need_mapping) { + mhp_range = arch_get_mappable_range(); + if (mhp_range.start > max_phys) { + mhp_range.start = 0; + mhp_range.end = 0; + } + mhp_range.end = min_t(u64, mhp_range.end, max_phys); + } else { + mhp_range.start = 0; + mhp_range.end = max_phys; + } + return mhp_range; +} +EXPORT_SYMBOL_GPL(mhp_get_pluggable_range); + +bool mhp_range_allowed(u64 start, u64 size, bool need_mapping) +{ + struct range mhp_range = mhp_get_pluggable_range(need_mapping); + u64 end = start + size; + + if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end) + return true; + + pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n", + start, end, mhp_range.start, mhp_range.end); + return false; +} + #ifdef CONFIG_MEMORY_HOTREMOVE /* * Confirm all pages in a range [start, end) belong to the same zone (skipping diff --git a/mm/memremap.c b/mm/memremap.c index 2455bac89506..7aa7d6e80ee5 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -200,6 +200,7 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref) static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, int range_id, int nid) { + const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE; struct range *range = &pgmap->ranges[range_id]; struct dev_pagemap *conflict_pgmap; int error, is_ram; @@ -245,6 +246,11 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, if (error) goto err_pfn_remap; + if (!mhp_range_allowed(range->start, range_len(range), !is_private)) { + error = -EINVAL; + goto err_pfn_remap; + } + mem_hotplug_begin(); /* @@ -258,7 +264,7 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, * the CPU, we do want the linear mapping and thus use * arch_add_memory(). */ - if (pgmap->type == MEMORY_DEVICE_PRIVATE) { + if (is_private) { error = add_pages(nid, PHYS_PFN(range->start), PHYS_PFN(range_len(range)), params); } else { -- cgit v1.2.3-59-g8ed1b From 5d5d19eda6b0ee790af89c45e3f678345be6f50f Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Thu, 25 Feb 2021 17:18:09 -0800 Subject: mm/rmap: fix potential pte_unmap on an not mapped pte For PMD-mapped page (usually THP), pvmw->pte is NULL. For PTE-mapped THP, pvmw->pte is mapped. But for HugeTLB pages, pvmw->pte is not mapped and set to the relevant page table entry. So in page_vma_mapped_walk_done(), we may do pte_unmap() for HugeTLB pte which is not mapped. Fix this by checking pvmw->page against PageHuge before trying to do pte_unmap(). Link: https://lkml.kernel.org/r/20210127093349.39081-1-linmiaohe@huawei.com Fixes: ace71a19cec5 ("mm: introduce page_vma_mapped_walk()") Signed-off-by: Hongxiang Lou Signed-off-by: Miaohe Lin Tested-by: Sedat Dilek Cc: Kees Cook Cc: Nathan Chancellor Cc: Mike Kravetz Cc: Shakeel Butt Cc: Johannes Weiner Cc: Vlastimil Babka Cc: Michel Lespinasse Cc: Nick Desaulniers Cc: "Kirill A. Shutemov" Cc: Wei Yang Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: Brian Geffon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rmap.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 70085ca1a3fc..def5c62c93b3 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -213,7 +213,8 @@ struct page_vma_mapped_walk { static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) { - if (pvmw->pte) + /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */ + if (pvmw->pte && !PageHuge(pvmw->page)) pte_unmap(pvmw->pte); if (pvmw->ptl) spin_unlock(pvmw->ptl); -- cgit v1.2.3-59-g8ed1b From fc6697a89f56d9773b2fbff718d4cf2a6d63379d Mon Sep 17 00:00:00 2001 From: Tian Tao Date: Thu, 25 Feb 2021 17:18:17 -0800 Subject: mm/zswap: add the flag can_sleep_mapped Patch series "Fix the compatibility of zsmalloc and zswap". Patch #1 adds a flag to zpool, then zswap used to determine if zpool drivers such as zbud/z3fold/zsmalloc will enter an atomic context after mapping. The difference between zbud/z3fold and zsmalloc is that zsmalloc requires an atomic context that since its map function holds a preempt-disabled, but zbud/z3fold don't require an atomic context. So patch #2 sets flag sleep_mapped to true indicating that zbud/z3fold can sleep after mapping. zsmalloc didn't support sleep after mapping, so don't set that flag to true. This patch (of 2): Add a flag to zpool, named is "can_sleep_mapped", and have it set true for zbud/z3fold, not set this flag for zsmalloc, so its default value is false. Then zswap could go the current path if the flag is true; and if it's false, copy data from src to a temporary buffer, then unmap the handle, take the mutex, process the buffer instead of src to avoid sleeping function called from atomic context. [natechancellor@gmail.com: add return value in zswap_frontswap_load] Link: https://lkml.kernel.org/r/20210121214804.926843-1-natechancellor@gmail.com [tiantao6@hisilicon.com: fix potential memory leak] Link: https://lkml.kernel.org/r/1611538365-51811-1-git-send-email-tiantao6@hisilicon.com [colin.king@canonical.com: fix potential uninitialized pointer read on tmp] Link: https://lkml.kernel.org/r/20210128141728.639030-1-colin.king@canonical.com [tiantao6@hisilicon.com: fix variable 'entry' is uninitialized when used] Link: https://lkml.kernel.org/r/1611223030-58346-1-git-send-email-tiantao6@hisilicon.comLink: https://lkml.kernel.org/r/1611035683-12732-1-git-send-email-tiantao6@hisilicon.com Link: https://lkml.kernel.org/r/1611035683-12732-2-git-send-email-tiantao6@hisilicon.com Signed-off-by: Tian Tao Signed-off-by: Nathan Chancellor Signed-off-by: Colin Ian King Reviewed-by: Vitaly Wool Acked-by: Sebastian Andrzej Siewior Reported-by: Mike Galbraith Cc: Barry Song Cc: Dan Streetman Cc: Seth Jennings Cc: Dan Carpenter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/zpool.h | 3 +++ mm/zpool.c | 13 +++++++++++++ mm/zswap.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 62 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 51bf43076165..e8997010612a 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -73,6 +73,7 @@ u64 zpool_get_total_size(struct zpool *pool); * @malloc: allocate mem from a pool. * @free: free mem from a pool. * @shrink: shrink the pool. + * @sleep_mapped: whether zpool driver can sleep during map. * @map: map a handle. * @unmap: unmap a handle. * @total_size: get total size of a pool. @@ -100,6 +101,7 @@ struct zpool_driver { int (*shrink)(void *pool, unsigned int pages, unsigned int *reclaimed); + bool sleep_mapped; void *(*map)(void *pool, unsigned long handle, enum zpool_mapmode mm); void (*unmap)(void *pool, unsigned long handle); @@ -112,5 +114,6 @@ void zpool_register_driver(struct zpool_driver *driver); int zpool_unregister_driver(struct zpool_driver *driver); bool zpool_evictable(struct zpool *pool); +bool zpool_can_sleep_mapped(struct zpool *pool); #endif diff --git a/mm/zpool.c b/mm/zpool.c index 3744a2d1a624..5ed71207ced7 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -23,6 +23,7 @@ struct zpool { void *pool; const struct zpool_ops *ops; bool evictable; + bool can_sleep_mapped; struct list_head list; }; @@ -183,6 +184,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, zpool->pool = driver->create(name, gfp, ops, zpool); zpool->ops = ops; zpool->evictable = driver->shrink && ops && ops->evict; + zpool->can_sleep_mapped = driver->sleep_mapped; if (!zpool->pool) { pr_err("couldn't create %s pool\n", type); @@ -393,6 +395,17 @@ bool zpool_evictable(struct zpool *zpool) return zpool->evictable; } +/** + * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped. + * @zpool: The zpool to test + * + * Returns: true if zpool can sleep; false otherwise. + */ +bool zpool_can_sleep_mapped(struct zpool *zpool) +{ + return zpool->can_sleep_mapped; +} + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dan Streetman "); MODULE_DESCRIPTION("Common API for compressed memory storage"); diff --git a/mm/zswap.c b/mm/zswap.c index 1e41c2857068..578d9f256920 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -935,13 +935,19 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; - u8 *src; + u8 *src, *tmp = NULL; unsigned int dlen; int ret; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, }; + if (!zpool_can_sleep_mapped(pool)) { + tmp = kmalloc(PAGE_SIZE, GFP_ATOMIC); + if (!tmp) + return -ENOMEM; + } + /* extract swpentry from data */ zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO); swpentry = zhdr->swpentry; /* here */ @@ -955,6 +961,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) /* entry was invalidated */ spin_unlock(&tree->lock); zpool_unmap_handle(pool, handle); + kfree(tmp); return 0; } spin_unlock(&tree->lock); @@ -979,6 +986,14 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) dlen = PAGE_SIZE; src = (u8 *)zhdr + sizeof(struct zswap_header); + if (!zpool_can_sleep_mapped(pool)) { + + memcpy(tmp, src, entry->length); + src = tmp; + + zpool_unmap_handle(pool, handle); + } + mutex_lock(acomp_ctx->mutex); sg_init_one(&input, src, entry->length); sg_init_table(&output, 1); @@ -1033,7 +1048,11 @@ fail: spin_unlock(&tree->lock); end: - zpool_unmap_handle(pool, handle); + if (zpool_can_sleep_mapped(pool)) + zpool_unmap_handle(pool, handle); + else + kfree(tmp); + return ret; } @@ -1235,7 +1254,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, struct zswap_entry *entry; struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; - u8 *src, *dst; + u8 *src, *dst, *tmp; unsigned int dlen; int ret; @@ -1253,15 +1272,33 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, dst = kmap_atomic(page); zswap_fill_page(dst, entry->value); kunmap_atomic(dst); + ret = 0; goto freeentry; } + if (!zpool_can_sleep_mapped(entry->pool->zpool)) { + + tmp = kmalloc(entry->length, GFP_ATOMIC); + if (!tmp) { + ret = -ENOMEM; + goto freeentry; + } + } + /* decompress */ dlen = PAGE_SIZE; src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO); if (zpool_evictable(entry->pool->zpool)) src += sizeof(struct zswap_header); + if (!zpool_can_sleep_mapped(entry->pool->zpool)) { + + memcpy(tmp, src, entry->length); + src = tmp; + + zpool_unmap_handle(entry->pool->zpool, entry->handle); + } + acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); mutex_lock(acomp_ctx->mutex); sg_init_one(&input, src, entry->length); @@ -1271,7 +1308,11 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait); mutex_unlock(acomp_ctx->mutex); - zpool_unmap_handle(entry->pool->zpool, entry->handle); + if (zpool_can_sleep_mapped(entry->pool->zpool)) + zpool_unmap_handle(entry->pool->zpool, entry->handle); + else + kfree(tmp); + BUG_ON(ret); freeentry: @@ -1279,7 +1320,7 @@ freeentry: zswap_entry_put(tree, entry); spin_unlock(&tree->lock); - return 0; + return ret; } /* frees an entry in zswap */ -- cgit v1.2.3-59-g8ed1b From 2395928158059b8f9858365fce7713ce7fef62e4 Mon Sep 17 00:00:00 2001 From: Rokudo Yan Date: Thu, 25 Feb 2021 17:18:31 -0800 Subject: zsmalloc: account the number of compacted pages correctly There exists multiple path may do zram compaction concurrently. 1. auto-compaction triggered during memory reclaim 2. userspace utils write zram/compaction node So, multiple threads may call zs_shrinker_scan/zs_compact concurrently. But pages_compacted is a per zsmalloc pool variable and modification of the variable is not serialized(through under class->lock). There are two issues here: 1. the pages_compacted may not equal to total number of pages freed(due to concurrently add). 2. zs_shrinker_scan may not return the correct number of pages freed(issued by current shrinker). The fix is simple: 1. account the number of pages freed in zs_compact locally. 2. use actomic variable pages_compacted to accumulate total number. Link: https://lkml.kernel.org/r/20210202122235.26885-1-wu-yan@tcl.com Fixes: 860c707dca155a56 ("zsmalloc: account the number of compacted pages") Signed-off-by: Rokudo Yan Cc: Minchan Kim Cc: Sergey Senozhatsky Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/zram/zram_drv.c | 2 +- include/linux/zsmalloc.h | 2 +- mm/zsmalloc.c | 17 +++++++++++------ 3 files changed, 13 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index d7018543842e..a711a2e2a794 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1081,7 +1081,7 @@ static ssize_t mm_stat_show(struct device *dev, zram->limit_pages << PAGE_SHIFT, max_used << PAGE_SHIFT, (u64)atomic64_read(&zram->stats.same_pages), - pool_stats.pages_compacted, + atomic_long_read(&pool_stats.pages_compacted), (u64)atomic64_read(&zram->stats.huge_pages), (u64)atomic64_read(&zram->stats.huge_pages_since)); up_read(&zram->init_lock); diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 4807ca4d52e0..2a430e713ce5 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -35,7 +35,7 @@ enum zs_mapmode { struct zs_pool_stats { /* How many pages were migrated (freed) */ - unsigned long pages_compacted; + atomic_long_t pages_compacted; }; struct zs_pool; diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index cf0ed0e4e911..1518732f95c3 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -2212,11 +2212,13 @@ static unsigned long zs_can_compact(struct size_class *class) return obj_wasted * class->pages_per_zspage; } -static void __zs_compact(struct zs_pool *pool, struct size_class *class) +static unsigned long __zs_compact(struct zs_pool *pool, + struct size_class *class) { struct zs_compact_control cc; struct zspage *src_zspage; struct zspage *dst_zspage = NULL; + unsigned long pages_freed = 0; spin_lock(&class->lock); while ((src_zspage = isolate_zspage(class, true))) { @@ -2246,7 +2248,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) putback_zspage(class, dst_zspage); if (putback_zspage(class, src_zspage) == ZS_EMPTY) { free_zspage(pool, class, src_zspage); - pool->stats.pages_compacted += class->pages_per_zspage; + pages_freed += class->pages_per_zspage; } spin_unlock(&class->lock); cond_resched(); @@ -2257,12 +2259,15 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) putback_zspage(class, src_zspage); spin_unlock(&class->lock); + + return pages_freed; } unsigned long zs_compact(struct zs_pool *pool) { int i; struct size_class *class; + unsigned long pages_freed = 0; for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { class = pool->size_class[i]; @@ -2270,10 +2275,11 @@ unsigned long zs_compact(struct zs_pool *pool) continue; if (class->index != i) continue; - __zs_compact(pool, class); + pages_freed += __zs_compact(pool, class); } + atomic_long_add(pages_freed, &pool->stats.pages_compacted); - return pool->stats.pages_compacted; + return pages_freed; } EXPORT_SYMBOL_GPL(zs_compact); @@ -2290,13 +2296,12 @@ static unsigned long zs_shrinker_scan(struct shrinker *shrinker, struct zs_pool *pool = container_of(shrinker, struct zs_pool, shrinker); - pages_freed = pool->stats.pages_compacted; /* * Compact classes and calculate compaction delta. * Can run concurrently with a manually triggered * (by user) compaction. */ - pages_freed = zs_compact(pool) - pages_freed; + pages_freed = zs_compact(pool); return pages_freed ? pages_freed : SHRINK_STOP; } -- cgit v1.2.3-59-g8ed1b From 4be408cec257d1156d35647db57726f5ef977630 Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Thu, 25 Feb 2021 17:18:38 -0800 Subject: mm: page-flags.h: Typo fix (It -> If) The "If" was wrongly spelled as "It". Link: https://lkml.kernel.org/r/1608959036-91409-1-git-send-email-guoren@kernel.org Signed-off-by: Guo Ren Cc: Oscar Salvador Cc: Alexander Duyck Cc: David Hildenbrand Cc: Steven Price Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index db914477057b..04a34c08e0a6 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -810,7 +810,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) /* * Flags checked when a page is freed. Pages being freed should not have - * these flags set. It they are, there is a problem. + * these flags set. If they are, there is a problem. */ #define PAGE_FLAGS_CHECK_AT_FREE \ (1UL << PG_lru | 1UL << PG_locked | \ @@ -821,7 +821,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) /* * Flags checked when a page is prepped for return by the page allocator. - * Pages being prepped should not have these flags set. It they are set, + * Pages being prepped should not have these flags set. If they are set, * there has been a kernel bug or struct page corruption. * * __PG_HWPOISON is exceptional because it needs to be kept beyond page's -- cgit v1.2.3-59-g8ed1b From 0ce20dd840897b12ae70869c69f1ba34d6d16965 Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Thu, 25 Feb 2021 17:18:53 -0800 Subject: mm: add Kernel Electric-Fence infrastructure Patch series "KFENCE: A low-overhead sampling-based memory safety error detector", v7. This adds the Kernel Electric-Fence (KFENCE) infrastructure. KFENCE is a low-overhead sampling-based memory safety error detector of heap use-after-free, invalid-free, and out-of-bounds access errors. This series enables KFENCE for the x86 and arm64 architectures, and adds KFENCE hooks to the SLAB and SLUB allocators. KFENCE is designed to be enabled in production kernels, and has near zero performance overhead. Compared to KASAN, KFENCE trades performance for precision. The main motivation behind KFENCE's design, is that with enough total uptime KFENCE will detect bugs in code paths not typically exercised by non-production test workloads. One way to quickly achieve a large enough total uptime is when the tool is deployed across a large fleet of machines. KFENCE objects each reside on a dedicated page, at either the left or right page boundaries. The pages to the left and right of the object page are "guard pages", whose attributes are changed to a protected state, and cause page faults on any attempted access to them. Such page faults are then intercepted by KFENCE, which handles the fault gracefully by reporting a memory access error. Guarded allocations are set up based on a sample interval (can be set via kfence.sample_interval). After expiration of the sample interval, the next allocation through the main allocator (SLAB or SLUB) returns a guarded allocation from the KFENCE object pool. At this point, the timer is reset, and the next allocation is set up after the expiration of the interval. To enable/disable a KFENCE allocation through the main allocator's fast-path without overhead, KFENCE relies on static branches via the static keys infrastructure. The static branch is toggled to redirect the allocation to KFENCE. The KFENCE memory pool is of fixed size, and if the pool is exhausted no further KFENCE allocations occur. The default config is conservative with only 255 objects, resulting in a pool size of 2 MiB (with 4 KiB pages). We have verified by running synthetic benchmarks (sysbench I/O, hackbench) and production server-workload benchmarks that a kernel with KFENCE (using sample intervals 100-500ms) is performance-neutral compared to a non-KFENCE baseline kernel. KFENCE is inspired by GWP-ASan [1], a userspace tool with similar properties. The name "KFENCE" is a homage to the Electric Fence Malloc Debugger [2]. For more details, see Documentation/dev-tools/kfence.rst added in the series -- also viewable here: https://raw.githubusercontent.com/google/kasan/kfence/Documentation/dev-tools/kfence.rst [1] http://llvm.org/docs/GwpAsan.html [2] https://linux.die.net/man/3/efence This patch (of 9): This adds the Kernel Electric-Fence (KFENCE) infrastructure. KFENCE is a low-overhead sampling-based memory safety error detector of heap use-after-free, invalid-free, and out-of-bounds access errors. KFENCE is designed to be enabled in production kernels, and has near zero performance overhead. Compared to KASAN, KFENCE trades performance for precision. The main motivation behind KFENCE's design, is that with enough total uptime KFENCE will detect bugs in code paths not typically exercised by non-production test workloads. One way to quickly achieve a large enough total uptime is when the tool is deployed across a large fleet of machines. KFENCE objects each reside on a dedicated page, at either the left or right page boundaries. The pages to the left and right of the object page are "guard pages", whose attributes are changed to a protected state, and cause page faults on any attempted access to them. Such page faults are then intercepted by KFENCE, which handles the fault gracefully by reporting a memory access error. To detect out-of-bounds writes to memory within the object's page itself, KFENCE also uses pattern-based redzones. The following figure illustrates the page layout: ---+-----------+-----------+-----------+-----------+-----------+--- | xxxxxxxxx | O : | xxxxxxxxx | : O | xxxxxxxxx | | xxxxxxxxx | B : | xxxxxxxxx | : B | xxxxxxxxx | | x GUARD x | J : RED- | x GUARD x | RED- : J | x GUARD x | | xxxxxxxxx | E : ZONE | xxxxxxxxx | ZONE : E | xxxxxxxxx | | xxxxxxxxx | C : | xxxxxxxxx | : C | xxxxxxxxx | | xxxxxxxxx | T : | xxxxxxxxx | : T | xxxxxxxxx | ---+-----------+-----------+-----------+-----------+-----------+--- Guarded allocations are set up based on a sample interval (can be set via kfence.sample_interval). After expiration of the sample interval, a guarded allocation from the KFENCE object pool is returned to the main allocator (SLAB or SLUB). At this point, the timer is reset, and the next allocation is set up after the expiration of the interval. To enable/disable a KFENCE allocation through the main allocator's fast-path without overhead, KFENCE relies on static branches via the static keys infrastructure. The static branch is toggled to redirect the allocation to KFENCE. To date, we have verified by running synthetic benchmarks (sysbench I/O, hackbench) that a kernel compiled with KFENCE is performance-neutral compared to the non-KFENCE baseline. For more details, see Documentation/dev-tools/kfence.rst (added later in the series). [elver@google.com: fix parameter description for kfence_object_start()] Link: https://lkml.kernel.org/r/20201106092149.GA2851373@elver.google.com [elver@google.com: avoid stalling work queue task without allocations] Link: https://lkml.kernel.org/r/CADYN=9J0DQhizAGB0-jz4HOBBh+05kMBXb4c0cXMS7Qi5NAJiw@mail.gmail.com Link: https://lkml.kernel.org/r/20201110135320.3309507-1-elver@google.com [elver@google.com: fix potential deadlock due to wake_up()] Link: https://lkml.kernel.org/r/000000000000c0645805b7f982e4@google.com Link: https://lkml.kernel.org/r/20210104130749.1768991-1-elver@google.com [elver@google.com: add option to use KFENCE without static keys] Link: https://lkml.kernel.org/r/20210111091544.3287013-1-elver@google.com [elver@google.com: add missing copyright and description headers] Link: https://lkml.kernel.org/r/20210118092159.145934-1-elver@google.com Link: https://lkml.kernel.org/r/20201103175841.3495947-2-elver@google.com Signed-off-by: Marco Elver Signed-off-by: Alexander Potapenko Reviewed-by: Dmitry Vyukov Reviewed-by: SeongJae Park Co-developed-by: Marco Elver Reviewed-by: Jann Horn Cc: "H. Peter Anvin" Cc: Paul E. McKenney Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christopher Lameter Cc: Dave Hansen Cc: David Rientjes Cc: Eric Dumazet Cc: Greg Kroah-Hartman Cc: Hillf Danton Cc: Ingo Molnar Cc: Jonathan Corbet Cc: Joonsoo Kim Cc: Joern Engel Cc: Kees Cook Cc: Mark Rutland Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kfence.h | 216 +++++++++++++ init/main.c | 3 + lib/Kconfig.debug | 1 + lib/Kconfig.kfence | 67 ++++ mm/Makefile | 1 + mm/kfence/Makefile | 3 + mm/kfence/core.c | 840 +++++++++++++++++++++++++++++++++++++++++++++++++ mm/kfence/kfence.h | 113 +++++++ mm/kfence/report.c | 240 ++++++++++++++ 9 files changed, 1484 insertions(+) create mode 100644 include/linux/kfence.h create mode 100644 lib/Kconfig.kfence create mode 100644 mm/kfence/Makefile create mode 100644 mm/kfence/core.c create mode 100644 mm/kfence/kfence.h create mode 100644 mm/kfence/report.c (limited to 'include/linux') diff --git a/include/linux/kfence.h b/include/linux/kfence.h new file mode 100644 index 000000000000..81f3911cb298 --- /dev/null +++ b/include/linux/kfence.h @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel Electric-Fence (KFENCE). Public interface for allocator and fault + * handler integration. For more info see Documentation/dev-tools/kfence.rst. + * + * Copyright (C) 2020, Google LLC. + */ + +#ifndef _LINUX_KFENCE_H +#define _LINUX_KFENCE_H + +#include +#include + +#ifdef CONFIG_KFENCE + +/* + * We allocate an even number of pages, as it simplifies calculations to map + * address to metadata indices; effectively, the very first page serves as an + * extended guard page, but otherwise has no special purpose. + */ +#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE) +extern char *__kfence_pool; + +#ifdef CONFIG_KFENCE_STATIC_KEYS +#include +DECLARE_STATIC_KEY_FALSE(kfence_allocation_key); +#else +#include +extern atomic_t kfence_allocation_gate; +#endif + +/** + * is_kfence_address() - check if an address belongs to KFENCE pool + * @addr: address to check + * + * Return: true or false depending on whether the address is within the KFENCE + * object range. + * + * KFENCE objects live in a separate page range and are not to be intermixed + * with regular heap objects (e.g. KFENCE objects must never be added to the + * allocator freelists). Failing to do so may and will result in heap + * corruptions, therefore is_kfence_address() must be used to check whether + * an object requires specific handling. + * + * Note: This function may be used in fast-paths, and is performance critical. + * Future changes should take this into account; for instance, we want to avoid + * introducing another load and therefore need to keep KFENCE_POOL_SIZE a + * constant (until immediate patching support is added to the kernel). + */ +static __always_inline bool is_kfence_address(const void *addr) +{ + /* + * The non-NULL check is required in case the __kfence_pool pointer was + * never initialized; keep it in the slow-path after the range-check. + */ + return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr); +} + +/** + * kfence_alloc_pool() - allocate the KFENCE pool via memblock + */ +void __init kfence_alloc_pool(void); + +/** + * kfence_init() - perform KFENCE initialization at boot time + * + * Requires that kfence_alloc_pool() was called before. This sets up the + * allocation gate timer, and requires that workqueues are available. + */ +void __init kfence_init(void); + +/** + * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects + * @s: cache being shut down + * + * Before shutting down a cache, one must ensure there are no remaining objects + * allocated from it. Because KFENCE objects are not referenced from the cache + * directly, we need to check them here. + * + * Note that shutdown_cache() is internal to SL*B, and kmem_cache_destroy() does + * not return if allocated objects still exist: it prints an error message and + * simply aborts destruction of a cache, leaking memory. + * + * If the only such objects are KFENCE objects, we will not leak the entire + * cache, but instead try to provide more useful debug info by making allocated + * objects "zombie allocations". Objects may then still be used or freed (which + * is handled gracefully), but usage will result in showing KFENCE error reports + * which include stack traces to the user of the object, the original allocation + * site, and caller to shutdown_cache(). + */ +void kfence_shutdown_cache(struct kmem_cache *s); + +/* + * Allocate a KFENCE object. Allocators must not call this function directly, + * use kfence_alloc() instead. + */ +void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags); + +/** + * kfence_alloc() - allocate a KFENCE object with a low probability + * @s: struct kmem_cache with object requirements + * @size: exact size of the object to allocate (can be less than @s->size + * e.g. for kmalloc caches) + * @flags: GFP flags + * + * Return: + * * NULL - must proceed with allocating as usual, + * * non-NULL - pointer to a KFENCE object. + * + * kfence_alloc() should be inserted into the heap allocation fast path, + * allowing it to transparently return KFENCE-allocated objects with a low + * probability using a static branch (the probability is controlled by the + * kfence.sample_interval boot parameter). + */ +static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) +{ +#ifdef CONFIG_KFENCE_STATIC_KEYS + if (static_branch_unlikely(&kfence_allocation_key)) +#else + if (unlikely(!atomic_read(&kfence_allocation_gate))) +#endif + return __kfence_alloc(s, size, flags); + return NULL; +} + +/** + * kfence_ksize() - get actual amount of memory allocated for a KFENCE object + * @addr: pointer to a heap object + * + * Return: + * * 0 - not a KFENCE object, must call __ksize() instead, + * * non-0 - this many bytes can be accessed without causing a memory error. + * + * kfence_ksize() returns the number of bytes requested for a KFENCE object at + * allocation time. This number may be less than the object size of the + * corresponding struct kmem_cache. + */ +size_t kfence_ksize(const void *addr); + +/** + * kfence_object_start() - find the beginning of a KFENCE object + * @addr: address within a KFENCE-allocated object + * + * Return: address of the beginning of the object. + * + * SL[AU]B-allocated objects are laid out within a page one by one, so it is + * easy to calculate the beginning of an object given a pointer inside it and + * the object size. The same is not true for KFENCE, which places a single + * object at either end of the page. This helper function is used to find the + * beginning of a KFENCE-allocated object. + */ +void *kfence_object_start(const void *addr); + +/** + * __kfence_free() - release a KFENCE heap object to KFENCE pool + * @addr: object to be freed + * + * Requires: is_kfence_address(addr) + * + * Release a KFENCE object and mark it as freed. + */ +void __kfence_free(void *addr); + +/** + * kfence_free() - try to release an arbitrary heap object to KFENCE pool + * @addr: object to be freed + * + * Return: + * * false - object doesn't belong to KFENCE pool and was ignored, + * * true - object was released to KFENCE pool. + * + * Release a KFENCE object and mark it as freed. May be called on any object, + * even non-KFENCE objects, to simplify integration of the hooks into the + * allocator's free codepath. The allocator must check the return value to + * determine if it was a KFENCE object or not. + */ +static __always_inline __must_check bool kfence_free(void *addr) +{ + if (!is_kfence_address(addr)) + return false; + __kfence_free(addr); + return true; +} + +/** + * kfence_handle_page_fault() - perform page fault handling for KFENCE pages + * @addr: faulting address + * + * Return: + * * false - address outside KFENCE pool, + * * true - page fault handled by KFENCE, no additional handling required. + * + * A page fault inside KFENCE pool indicates a memory error, such as an + * out-of-bounds access, a use-after-free or an invalid memory access. In these + * cases KFENCE prints an error message and marks the offending page as + * present, so that the kernel can proceed. + */ +bool __must_check kfence_handle_page_fault(unsigned long addr); + +#else /* CONFIG_KFENCE */ + +static inline bool is_kfence_address(const void *addr) { return false; } +static inline void kfence_alloc_pool(void) { } +static inline void kfence_init(void) { } +static inline void kfence_shutdown_cache(struct kmem_cache *s) { } +static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; } +static inline size_t kfence_ksize(const void *addr) { return 0; } +static inline void *kfence_object_start(const void *addr) { return NULL; } +static inline void __kfence_free(void *addr) { } +static inline bool __must_check kfence_free(void *addr) { return false; } +static inline bool __must_check kfence_handle_page_fault(unsigned long addr) { return false; } + +#endif + +#endif /* _LINUX_KFENCE_H */ diff --git a/init/main.c b/init/main.c index e9933cbf60d4..261051070e3c 100644 --- a/init/main.c +++ b/init/main.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -824,6 +825,7 @@ static void __init mm_init(void) */ page_ext_init_flatmem(); init_mem_debugging_and_hardening(); + kfence_alloc_pool(); report_meminit(); mem_init(); /* page_owner must be initialized after buddy is ready */ @@ -955,6 +957,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) hrtimers_init(); softirq_init(); timekeeping_init(); + kfence_init(); /* * For best initial stack canary entropy, prepare it after: diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f9febffffc21..2779c29d9981 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -938,6 +938,7 @@ config DEBUG_STACKOVERFLOW If in doubt, say "N". source "lib/Kconfig.kasan" +source "lib/Kconfig.kfence" endmenu # "Memory Debugging" diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence new file mode 100644 index 000000000000..b88ac9d6b2e6 --- /dev/null +++ b/lib/Kconfig.kfence @@ -0,0 +1,67 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config HAVE_ARCH_KFENCE + bool + +menuconfig KFENCE + bool "KFENCE: low-overhead sampling-based memory safety error detector" + depends on HAVE_ARCH_KFENCE && !KASAN && (SLAB || SLUB) + select STACKTRACE + help + KFENCE is a low-overhead sampling-based detector of heap out-of-bounds + access, use-after-free, and invalid-free errors. KFENCE is designed + to have negligible cost to permit enabling it in production + environments. + + Note that, KFENCE is not a substitute for explicit testing with tools + such as KASAN. KFENCE can detect a subset of bugs that KASAN can + detect, albeit at very different performance profiles. If you can + afford to use KASAN, continue using KASAN, for example in test + environments. If your kernel targets production use, and cannot + enable KASAN due to its cost, consider using KFENCE. + +if KFENCE + +config KFENCE_STATIC_KEYS + bool "Use static keys to set up allocations" + default y + depends on JUMP_LABEL # To ensure performance, require jump labels + help + Use static keys (static branches) to set up KFENCE allocations. Using + static keys is normally recommended, because it avoids a dynamic + branch in the allocator's fast path. However, with very low sample + intervals, or on systems that do not support jump labels, a dynamic + branch may still be an acceptable performance trade-off. + +config KFENCE_SAMPLE_INTERVAL + int "Default sample interval in milliseconds" + default 100 + help + The KFENCE sample interval determines the frequency with which heap + allocations will be guarded by KFENCE. May be overridden via boot + parameter "kfence.sample_interval". + + Set this to 0 to disable KFENCE by default, in which case only + setting "kfence.sample_interval" to a non-zero value enables KFENCE. + +config KFENCE_NUM_OBJECTS + int "Number of guarded objects available" + range 1 65535 + default 255 + help + The number of guarded objects available. For each KFENCE object, 2 + pages are required; with one containing the object and two adjacent + ones used as guard pages. + +config KFENCE_STRESS_TEST_FAULTS + int "Stress testing of fault handling and error reporting" if EXPERT + default 0 + help + The inverse probability with which to randomly protect KFENCE object + pages, resulting in spurious use-after-frees. The main purpose of + this option is to stress test KFENCE with concurrent error reports + and allocations/frees. A value of 0 disables stress testing logic. + + Only for KFENCE testing; set to 0 if you are not a KFENCE developer. + +endif # KFENCE diff --git a/mm/Makefile b/mm/Makefile index 135bbb65511a..72227b24a616 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -81,6 +81,7 @@ obj-$(CONFIG_PAGE_POISONING) += page_poison.o obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLUB) += slub.o obj-$(CONFIG_KASAN) += kasan/ +obj-$(CONFIG_KFENCE) += kfence/ obj-$(CONFIG_FAILSLAB) += failslab.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_MEMTEST) += memtest.o diff --git a/mm/kfence/Makefile b/mm/kfence/Makefile new file mode 100644 index 000000000000..d991e9a349f0 --- /dev/null +++ b/mm/kfence/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_KFENCE) := core.o report.o diff --git a/mm/kfence/core.c b/mm/kfence/core.c new file mode 100644 index 000000000000..d6a32c13336b --- /dev/null +++ b/mm/kfence/core.c @@ -0,0 +1,840 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KFENCE guarded object allocator and fault handling. + * + * Copyright (C) 2020, Google LLC. + */ + +#define pr_fmt(fmt) "kfence: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "kfence.h" + +/* Disables KFENCE on the first warning assuming an irrecoverable error. */ +#define KFENCE_WARN_ON(cond) \ + ({ \ + const bool __cond = WARN_ON(cond); \ + if (unlikely(__cond)) \ + WRITE_ONCE(kfence_enabled, false); \ + __cond; \ + }) + +/* === Data ================================================================= */ + +static bool kfence_enabled __read_mostly; + +static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; + +#ifdef MODULE_PARAM_PREFIX +#undef MODULE_PARAM_PREFIX +#endif +#define MODULE_PARAM_PREFIX "kfence." + +static int param_set_sample_interval(const char *val, const struct kernel_param *kp) +{ + unsigned long num; + int ret = kstrtoul(val, 0, &num); + + if (ret < 0) + return ret; + + if (!num) /* Using 0 to indicate KFENCE is disabled. */ + WRITE_ONCE(kfence_enabled, false); + else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) + return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */ + + *((unsigned long *)kp->arg) = num; + return 0; +} + +static int param_get_sample_interval(char *buffer, const struct kernel_param *kp) +{ + if (!READ_ONCE(kfence_enabled)) + return sprintf(buffer, "0\n"); + + return param_get_ulong(buffer, kp); +} + +static const struct kernel_param_ops sample_interval_param_ops = { + .set = param_set_sample_interval, + .get = param_get_sample_interval, +}; +module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600); + +/* The pool of pages used for guard pages and objects. */ +char *__kfence_pool __ro_after_init; +EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */ + +/* + * Per-object metadata, with one-to-one mapping of object metadata to + * backing pages (in __kfence_pool). + */ +static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0); +struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; + +/* Freelist with available objects. */ +static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist); +static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */ + +#ifdef CONFIG_KFENCE_STATIC_KEYS +/* The static key to set up a KFENCE allocation. */ +DEFINE_STATIC_KEY_FALSE(kfence_allocation_key); +#endif + +/* Gates the allocation, ensuring only one succeeds in a given period. */ +atomic_t kfence_allocation_gate = ATOMIC_INIT(1); + +/* Statistics counters for debugfs. */ +enum kfence_counter_id { + KFENCE_COUNTER_ALLOCATED, + KFENCE_COUNTER_ALLOCS, + KFENCE_COUNTER_FREES, + KFENCE_COUNTER_ZOMBIES, + KFENCE_COUNTER_BUGS, + KFENCE_COUNTER_COUNT, +}; +static atomic_long_t counters[KFENCE_COUNTER_COUNT]; +static const char *const counter_names[] = { + [KFENCE_COUNTER_ALLOCATED] = "currently allocated", + [KFENCE_COUNTER_ALLOCS] = "total allocations", + [KFENCE_COUNTER_FREES] = "total frees", + [KFENCE_COUNTER_ZOMBIES] = "zombie allocations", + [KFENCE_COUNTER_BUGS] = "total bugs", +}; +static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT); + +/* === Internals ============================================================ */ + +static bool kfence_protect(unsigned long addr) +{ + return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true)); +} + +static bool kfence_unprotect(unsigned long addr) +{ + return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false)); +} + +static inline struct kfence_metadata *addr_to_metadata(unsigned long addr) +{ + long index; + + /* The checks do not affect performance; only called from slow-paths. */ + + if (!is_kfence_address((void *)addr)) + return NULL; + + /* + * May be an invalid index if called with an address at the edge of + * __kfence_pool, in which case we would report an "invalid access" + * error. + */ + index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1; + if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS) + return NULL; + + return &kfence_metadata[index]; +} + +static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) +{ + unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; + unsigned long pageaddr = (unsigned long)&__kfence_pool[offset]; + + /* The checks do not affect performance; only called from slow-paths. */ + + /* Only call with a pointer into kfence_metadata. */ + if (KFENCE_WARN_ON(meta < kfence_metadata || + meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS)) + return 0; + + /* + * This metadata object only ever maps to 1 page; verify that the stored + * address is in the expected range. + */ + if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr)) + return 0; + + return pageaddr; +} + +/* + * Update the object's metadata state, including updating the alloc/free stacks + * depending on the state transition. + */ +static noinline void metadata_update_state(struct kfence_metadata *meta, + enum kfence_object_state next) +{ + struct kfence_track *track = + next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; + + lockdep_assert_held(&meta->lock); + + /* + * Skip over 1 (this) functions; noinline ensures we do not accidentally + * skip over the caller by never inlining. + */ + track->num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1); + track->pid = task_pid_nr(current); + + /* + * Pairs with READ_ONCE() in + * kfence_shutdown_cache(), + * kfence_handle_page_fault(). + */ + WRITE_ONCE(meta->state, next); +} + +/* Write canary byte to @addr. */ +static inline bool set_canary_byte(u8 *addr) +{ + *addr = KFENCE_CANARY_PATTERN(addr); + return true; +} + +/* Check canary byte at @addr. */ +static inline bool check_canary_byte(u8 *addr) +{ + if (likely(*addr == KFENCE_CANARY_PATTERN(addr))) + return true; + + atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); + kfence_report_error((unsigned long)addr, addr_to_metadata((unsigned long)addr), + KFENCE_ERROR_CORRUPTION); + return false; +} + +/* __always_inline this to ensure we won't do an indirect call to fn. */ +static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *)) +{ + const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); + unsigned long addr; + + lockdep_assert_held(&meta->lock); + + /* + * We'll iterate over each canary byte per-side until fn() returns + * false. However, we'll still iterate over the canary bytes to the + * right of the object even if there was an error in the canary bytes to + * the left of the object. Specifically, if check_canary_byte() + * generates an error, showing both sides might give more clues as to + * what the error is about when displaying which bytes were corrupted. + */ + + /* Apply to left of object. */ + for (addr = pageaddr; addr < meta->addr; addr++) { + if (!fn((u8 *)addr)) + break; + } + + /* Apply to right of object. */ + for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) { + if (!fn((u8 *)addr)) + break; + } +} + +static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp) +{ + struct kfence_metadata *meta = NULL; + unsigned long flags; + struct page *page; + void *addr; + + /* Try to obtain a free object. */ + raw_spin_lock_irqsave(&kfence_freelist_lock, flags); + if (!list_empty(&kfence_freelist)) { + meta = list_entry(kfence_freelist.next, struct kfence_metadata, list); + list_del_init(&meta->list); + } + raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); + if (!meta) + return NULL; + + if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { + /* + * This is extremely unlikely -- we are reporting on a + * use-after-free, which locked meta->lock, and the reporting + * code via printk calls kmalloc() which ends up in + * kfence_alloc() and tries to grab the same object that we're + * reporting on. While it has never been observed, lockdep does + * report that there is a possibility of deadlock. Fix it by + * using trylock and bailing out gracefully. + */ + raw_spin_lock_irqsave(&kfence_freelist_lock, flags); + /* Put the object back on the freelist. */ + list_add_tail(&meta->list, &kfence_freelist); + raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); + + return NULL; + } + + meta->addr = metadata_to_pageaddr(meta); + /* Unprotect if we're reusing this page. */ + if (meta->state == KFENCE_OBJECT_FREED) + kfence_unprotect(meta->addr); + + /* + * Note: for allocations made before RNG initialization, will always + * return zero. We still benefit from enabling KFENCE as early as + * possible, even when the RNG is not yet available, as this will allow + * KFENCE to detect bugs due to earlier allocations. The only downside + * is that the out-of-bounds accesses detected are deterministic for + * such allocations. + */ + if (prandom_u32_max(2)) { + /* Allocate on the "right" side, re-calculate address. */ + meta->addr += PAGE_SIZE - size; + meta->addr = ALIGN_DOWN(meta->addr, cache->align); + } + + addr = (void *)meta->addr; + + /* Update remaining metadata. */ + metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED); + /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */ + WRITE_ONCE(meta->cache, cache); + meta->size = size; + for_each_canary(meta, set_canary_byte); + + /* Set required struct page fields. */ + page = virt_to_page(meta->addr); + page->slab_cache = cache; + + raw_spin_unlock_irqrestore(&meta->lock, flags); + + /* Memory initialization. */ + + /* + * We check slab_want_init_on_alloc() ourselves, rather than letting + * SL*B do the initialization, as otherwise we might overwrite KFENCE's + * redzone. + */ + if (unlikely(slab_want_init_on_alloc(gfp, cache))) + memzero_explicit(addr, size); + if (cache->ctor) + cache->ctor(addr); + + if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS)) + kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ + + atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]); + atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]); + + return addr; +} + +static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) +{ + struct kcsan_scoped_access assert_page_exclusive; + unsigned long flags; + + raw_spin_lock_irqsave(&meta->lock, flags); + + if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { + /* Invalid or double-free, bail out. */ + atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); + kfence_report_error((unsigned long)addr, meta, KFENCE_ERROR_INVALID_FREE); + raw_spin_unlock_irqrestore(&meta->lock, flags); + return; + } + + /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ + kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE, + KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, + &assert_page_exclusive); + + if (CONFIG_KFENCE_STRESS_TEST_FAULTS) + kfence_unprotect((unsigned long)addr); /* To check canary bytes. */ + + /* Restore page protection if there was an OOB access. */ + if (meta->unprotected_page) { + kfence_protect(meta->unprotected_page); + meta->unprotected_page = 0; + } + + /* Check canary bytes for memory corruption. */ + for_each_canary(meta, check_canary_byte); + + /* + * Clear memory if init-on-free is set. While we protect the page, the + * data is still there, and after a use-after-free is detected, we + * unprotect the page, so the data is still accessible. + */ + if (!zombie && unlikely(slab_want_init_on_free(meta->cache))) + memzero_explicit(addr, meta->size); + + /* Mark the object as freed. */ + metadata_update_state(meta, KFENCE_OBJECT_FREED); + + raw_spin_unlock_irqrestore(&meta->lock, flags); + + /* Protect to detect use-after-frees. */ + kfence_protect((unsigned long)addr); + + kcsan_end_scoped_access(&assert_page_exclusive); + if (!zombie) { + /* Add it to the tail of the freelist for reuse. */ + raw_spin_lock_irqsave(&kfence_freelist_lock, flags); + KFENCE_WARN_ON(!list_empty(&meta->list)); + list_add_tail(&meta->list, &kfence_freelist); + raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); + + atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]); + atomic_long_inc(&counters[KFENCE_COUNTER_FREES]); + } else { + /* See kfence_shutdown_cache(). */ + atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]); + } +} + +static void rcu_guarded_free(struct rcu_head *h) +{ + struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head); + + kfence_guarded_free((void *)meta->addr, meta, false); +} + +static bool __init kfence_init_pool(void) +{ + unsigned long addr = (unsigned long)__kfence_pool; + struct page *pages; + int i; + + if (!__kfence_pool) + return false; + + if (!arch_kfence_init_pool()) + goto err; + + pages = virt_to_page(addr); + + /* + * Set up object pages: they must have PG_slab set, to avoid freeing + * these as real pages. + * + * We also want to avoid inserting kfence_free() in the kfree() + * fast-path in SLUB, and therefore need to ensure kfree() correctly + * enters __slab_free() slow-path. + */ + for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { + if (!i || (i % 2)) + continue; + + /* Verify we do not have a compound head page. */ + if (WARN_ON(compound_head(&pages[i]) != &pages[i])) + goto err; + + __SetPageSlab(&pages[i]); + } + + /* + * Protect the first 2 pages. The first page is mostly unnecessary, and + * merely serves as an extended guard page. However, adding one + * additional page in the beginning gives us an even number of pages, + * which simplifies the mapping of address to metadata index. + */ + for (i = 0; i < 2; i++) { + if (unlikely(!kfence_protect(addr))) + goto err; + + addr += PAGE_SIZE; + } + + for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + struct kfence_metadata *meta = &kfence_metadata[i]; + + /* Initialize metadata. */ + INIT_LIST_HEAD(&meta->list); + raw_spin_lock_init(&meta->lock); + meta->state = KFENCE_OBJECT_UNUSED; + meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ + list_add_tail(&meta->list, &kfence_freelist); + + /* Protect the right redzone. */ + if (unlikely(!kfence_protect(addr + PAGE_SIZE))) + goto err; + + addr += 2 * PAGE_SIZE; + } + + return true; + +err: + /* + * Only release unprotected pages, and do not try to go back and change + * page attributes due to risk of failing to do so as well. If changing + * page attributes for some pages fails, it is very likely that it also + * fails for the first page, and therefore expect addr==__kfence_pool in + * most failure cases. + */ + memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); + __kfence_pool = NULL; + return false; +} + +/* === DebugFS Interface ==================================================== */ + +static int stats_show(struct seq_file *seq, void *v) +{ + int i; + + seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled)); + for (i = 0; i < KFENCE_COUNTER_COUNT; i++) + seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i])); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(stats); + +/* + * debugfs seq_file operations for /sys/kernel/debug/kfence/objects. + * start_object() and next_object() return the object index + 1, because NULL is used + * to stop iteration. + */ +static void *start_object(struct seq_file *seq, loff_t *pos) +{ + if (*pos < CONFIG_KFENCE_NUM_OBJECTS) + return (void *)((long)*pos + 1); + return NULL; +} + +static void stop_object(struct seq_file *seq, void *v) +{ +} + +static void *next_object(struct seq_file *seq, void *v, loff_t *pos) +{ + ++*pos; + if (*pos < CONFIG_KFENCE_NUM_OBJECTS) + return (void *)((long)*pos + 1); + return NULL; +} + +static int show_object(struct seq_file *seq, void *v) +{ + struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; + unsigned long flags; + + raw_spin_lock_irqsave(&meta->lock, flags); + kfence_print_object(seq, meta); + raw_spin_unlock_irqrestore(&meta->lock, flags); + seq_puts(seq, "---------------------------------\n"); + + return 0; +} + +static const struct seq_operations object_seqops = { + .start = start_object, + .next = next_object, + .stop = stop_object, + .show = show_object, +}; + +static int open_objects(struct inode *inode, struct file *file) +{ + return seq_open(file, &object_seqops); +} + +static const struct file_operations objects_fops = { + .open = open_objects, + .read = seq_read, + .llseek = seq_lseek, +}; + +static int __init kfence_debugfs_init(void) +{ + struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL); + + debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops); + debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops); + return 0; +} + +late_initcall(kfence_debugfs_init); + +/* === Allocation Gate Timer ================================================ */ + +/* + * Set up delayed work, which will enable and disable the static key. We need to + * use a work queue (rather than a simple timer), since enabling and disabling a + * static key cannot be done from an interrupt. + * + * Note: Toggling a static branch currently causes IPIs, and here we'll end up + * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with + * more aggressive sampling intervals), we could get away with a variant that + * avoids IPIs, at the cost of not immediately capturing allocations if the + * instructions remain cached. + */ +static struct delayed_work kfence_timer; +static void toggle_allocation_gate(struct work_struct *work) +{ + if (!READ_ONCE(kfence_enabled)) + return; + + /* Enable static key, and await allocation to happen. */ + atomic_set(&kfence_allocation_gate, 0); +#ifdef CONFIG_KFENCE_STATIC_KEYS + static_branch_enable(&kfence_allocation_key); + /* + * Await an allocation. Timeout after 1 second, in case the kernel stops + * doing allocations, to avoid stalling this worker task for too long. + */ + { + unsigned long end_wait = jiffies + HZ; + + do { + set_current_state(TASK_UNINTERRUPTIBLE); + if (atomic_read(&kfence_allocation_gate) != 0) + break; + schedule_timeout(1); + } while (time_before(jiffies, end_wait)); + __set_current_state(TASK_RUNNING); + } + /* Disable static key and reset timer. */ + static_branch_disable(&kfence_allocation_key); +#endif + schedule_delayed_work(&kfence_timer, msecs_to_jiffies(kfence_sample_interval)); +} +static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate); + +/* === Public interface ===================================================== */ + +void __init kfence_alloc_pool(void) +{ + if (!kfence_sample_interval) + return; + + __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + + if (!__kfence_pool) + pr_err("failed to allocate pool\n"); +} + +void __init kfence_init(void) +{ + /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ + if (!kfence_sample_interval) + return; + + if (!kfence_init_pool()) { + pr_err("%s failed\n", __func__); + return; + } + + WRITE_ONCE(kfence_enabled, true); + schedule_delayed_work(&kfence_timer, 0); + pr_info("initialized - using %lu bytes for %d objects", KFENCE_POOL_SIZE, + CONFIG_KFENCE_NUM_OBJECTS); + if (IS_ENABLED(CONFIG_DEBUG_KERNEL)) + pr_cont(" at 0x%px-0x%px\n", (void *)__kfence_pool, + (void *)(__kfence_pool + KFENCE_POOL_SIZE)); + else + pr_cont("\n"); +} + +void kfence_shutdown_cache(struct kmem_cache *s) +{ + unsigned long flags; + struct kfence_metadata *meta; + int i; + + for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + bool in_use; + + meta = &kfence_metadata[i]; + + /* + * If we observe some inconsistent cache and state pair where we + * should have returned false here, cache destruction is racing + * with either kmem_cache_alloc() or kmem_cache_free(). Taking + * the lock will not help, as different critical section + * serialization will have the same outcome. + */ + if (READ_ONCE(meta->cache) != s || + READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED) + continue; + + raw_spin_lock_irqsave(&meta->lock, flags); + in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED; + raw_spin_unlock_irqrestore(&meta->lock, flags); + + if (in_use) { + /* + * This cache still has allocations, and we should not + * release them back into the freelist so they can still + * safely be used and retain the kernel's default + * behaviour of keeping the allocations alive (leak the + * cache); however, they effectively become "zombie + * allocations" as the KFENCE objects are the only ones + * still in use and the owning cache is being destroyed. + * + * We mark them freed, so that any subsequent use shows + * more useful error messages that will include stack + * traces of the user of the object, the original + * allocation, and caller to shutdown_cache(). + */ + kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true); + } + } + + for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + meta = &kfence_metadata[i]; + + /* See above. */ + if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED) + continue; + + raw_spin_lock_irqsave(&meta->lock, flags); + if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED) + meta->cache = NULL; + raw_spin_unlock_irqrestore(&meta->lock, flags); + } +} + +void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) +{ + /* + * allocation_gate only needs to become non-zero, so it doesn't make + * sense to continue writing to it and pay the associated contention + * cost, in case we have a large number of concurrent allocations. + */ + if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1) + return NULL; + + if (!READ_ONCE(kfence_enabled)) + return NULL; + + if (size > PAGE_SIZE) + return NULL; + + return kfence_guarded_alloc(s, size, flags); +} + +size_t kfence_ksize(const void *addr) +{ + const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); + + /* + * Read locklessly -- if there is a race with __kfence_alloc(), this is + * either a use-after-free or invalid access. + */ + return meta ? meta->size : 0; +} + +void *kfence_object_start(const void *addr) +{ + const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); + + /* + * Read locklessly -- if there is a race with __kfence_alloc(), this is + * either a use-after-free or invalid access. + */ + return meta ? (void *)meta->addr : NULL; +} + +void __kfence_free(void *addr) +{ + struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); + + /* + * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing + * the object, as the object page may be recycled for other-typed + * objects once it has been freed. meta->cache may be NULL if the cache + * was destroyed. + */ + if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) + call_rcu(&meta->rcu_head, rcu_guarded_free); + else + kfence_guarded_free(addr, meta, false); +} + +bool kfence_handle_page_fault(unsigned long addr) +{ + const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; + struct kfence_metadata *to_report = NULL; + enum kfence_error_type error_type; + unsigned long flags; + + if (!is_kfence_address((void *)addr)) + return false; + + if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */ + return kfence_unprotect(addr); /* ... unprotect and proceed. */ + + atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); + + if (page_index % 2) { + /* This is a redzone, report a buffer overflow. */ + struct kfence_metadata *meta; + int distance = 0; + + meta = addr_to_metadata(addr - PAGE_SIZE); + if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { + to_report = meta; + /* Data race ok; distance calculation approximate. */ + distance = addr - data_race(meta->addr + meta->size); + } + + meta = addr_to_metadata(addr + PAGE_SIZE); + if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { + /* Data race ok; distance calculation approximate. */ + if (!to_report || distance > data_race(meta->addr) - addr) + to_report = meta; + } + + if (!to_report) + goto out; + + raw_spin_lock_irqsave(&to_report->lock, flags); + to_report->unprotected_page = addr; + error_type = KFENCE_ERROR_OOB; + + /* + * If the object was freed before we took the look we can still + * report this as an OOB -- the report will simply show the + * stacktrace of the free as well. + */ + } else { + to_report = addr_to_metadata(addr); + if (!to_report) + goto out; + + raw_spin_lock_irqsave(&to_report->lock, flags); + error_type = KFENCE_ERROR_UAF; + /* + * We may race with __kfence_alloc(), and it is possible that a + * freed object may be reallocated. We simply report this as a + * use-after-free, with the stack trace showing the place where + * the object was re-allocated. + */ + } + +out: + if (to_report) { + kfence_report_error(addr, to_report, error_type); + raw_spin_unlock_irqrestore(&to_report->lock, flags); + } else { + /* This may be a UAF or OOB access, but we can't be sure. */ + kfence_report_error(addr, NULL, KFENCE_ERROR_INVALID); + } + + return kfence_unprotect(addr); /* Unprotect and let access proceed. */ +} diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h new file mode 100644 index 000000000000..1014060f9707 --- /dev/null +++ b/mm/kfence/kfence.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel Electric-Fence (KFENCE). For more info please see + * Documentation/dev-tools/kfence.rst. + * + * Copyright (C) 2020, Google LLC. + */ + +#ifndef MM_KFENCE_KFENCE_H +#define MM_KFENCE_KFENCE_H + +#include +#include +#include +#include + +#include "../slab.h" /* for struct kmem_cache */ + +/* For non-debug builds, avoid leaking kernel pointers into dmesg. */ +#ifdef CONFIG_DEBUG_KERNEL +#define PTR_FMT "%px" +#else +#define PTR_FMT "%p" +#endif + +/* + * Get the canary byte pattern for @addr. Use a pattern that varies based on the + * lower 3 bits of the address, to detect memory corruptions with higher + * probability, where similar constants are used. + */ +#define KFENCE_CANARY_PATTERN(addr) ((u8)0xaa ^ (u8)((unsigned long)(addr) & 0x7)) + +/* Maximum stack depth for reports. */ +#define KFENCE_STACK_DEPTH 64 + +/* KFENCE object states. */ +enum kfence_object_state { + KFENCE_OBJECT_UNUSED, /* Object is unused. */ + KFENCE_OBJECT_ALLOCATED, /* Object is currently allocated. */ + KFENCE_OBJECT_FREED, /* Object was allocated, and then freed. */ +}; + +/* Alloc/free tracking information. */ +struct kfence_track { + pid_t pid; + int num_stack_entries; + unsigned long stack_entries[KFENCE_STACK_DEPTH]; +}; + +/* KFENCE metadata per guarded allocation. */ +struct kfence_metadata { + struct list_head list; /* Freelist node; access under kfence_freelist_lock. */ + struct rcu_head rcu_head; /* For delayed freeing. */ + + /* + * Lock protecting below data; to ensure consistency of the below data, + * since the following may execute concurrently: __kfence_alloc(), + * __kfence_free(), kfence_handle_page_fault(). However, note that we + * cannot grab the same metadata off the freelist twice, and multiple + * __kfence_alloc() cannot run concurrently on the same metadata. + */ + raw_spinlock_t lock; + + /* The current state of the object; see above. */ + enum kfence_object_state state; + + /* + * Allocated object address; cannot be calculated from size, because of + * alignment requirements. + * + * Invariant: ALIGN_DOWN(addr, PAGE_SIZE) is constant. + */ + unsigned long addr; + + /* + * The size of the original allocation. + */ + size_t size; + + /* + * The kmem_cache cache of the last allocation; NULL if never allocated + * or the cache has already been destroyed. + */ + struct kmem_cache *cache; + + /* + * In case of an invalid access, the page that was unprotected; we + * optimistically only store one address. + */ + unsigned long unprotected_page; + + /* Allocation and free stack information. */ + struct kfence_track alloc_track; + struct kfence_track free_track; +}; + +extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; + +/* KFENCE error types for report generation. */ +enum kfence_error_type { + KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ + KFENCE_ERROR_UAF, /* Detected a use-after-free access. */ + KFENCE_ERROR_CORRUPTION, /* Detected a memory corruption on free. */ + KFENCE_ERROR_INVALID, /* Invalid access of unknown type. */ + KFENCE_ERROR_INVALID_FREE, /* Invalid free. */ +}; + +void kfence_report_error(unsigned long address, const struct kfence_metadata *meta, + enum kfence_error_type type); + +void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta); + +#endif /* MM_KFENCE_KFENCE_H */ diff --git a/mm/kfence/report.c b/mm/kfence/report.c new file mode 100644 index 000000000000..64f27c8d46a3 --- /dev/null +++ b/mm/kfence/report.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KFENCE reporting. + * + * Copyright (C) 2020, Google LLC. + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include "kfence.h" + +/* Helper function to either print to a seq_file or to console. */ +__printf(2, 3) +static void seq_con_printf(struct seq_file *seq, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + if (seq) + seq_vprintf(seq, fmt, args); + else + vprintk(fmt, args); + va_end(args); +} + +/* + * Get the number of stack entries to skip to get out of MM internals. @type is + * optional, and if set to NULL, assumes an allocation or free stack. + */ +static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries, + const enum kfence_error_type *type) +{ + char buf[64]; + int skipnr, fallback = 0; + bool is_access_fault = false; + + if (type) { + /* Depending on error type, find different stack entries. */ + switch (*type) { + case KFENCE_ERROR_UAF: + case KFENCE_ERROR_OOB: + case KFENCE_ERROR_INVALID: + is_access_fault = true; + break; + case KFENCE_ERROR_CORRUPTION: + case KFENCE_ERROR_INVALID_FREE: + break; + } + } + + for (skipnr = 0; skipnr < num_entries; skipnr++) { + int len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skipnr]); + + if (is_access_fault) { + if (!strncmp(buf, KFENCE_SKIP_ARCH_FAULT_HANDLER, len)) + goto found; + } else { + if (str_has_prefix(buf, "kfence_") || str_has_prefix(buf, "__kfence_") || + !strncmp(buf, "__slab_free", len)) { + /* + * In case of tail calls from any of the below + * to any of the above. + */ + fallback = skipnr + 1; + } + + /* Also the *_bulk() variants by only checking prefixes. */ + if (str_has_prefix(buf, "kfree") || + str_has_prefix(buf, "kmem_cache_free") || + str_has_prefix(buf, "__kmalloc") || + str_has_prefix(buf, "kmem_cache_alloc")) + goto found; + } + } + if (fallback < num_entries) + return fallback; +found: + skipnr++; + return skipnr < num_entries ? skipnr : 0; +} + +static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadata *meta, + bool show_alloc) +{ + const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track; + + if (track->num_stack_entries) { + /* Skip allocation/free internals stack. */ + int i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL); + + /* stack_trace_seq_print() does not exist; open code our own. */ + for (; i < track->num_stack_entries; i++) + seq_con_printf(seq, " %pS\n", (void *)track->stack_entries[i]); + } else { + seq_con_printf(seq, " no %s stack\n", show_alloc ? "allocation" : "deallocation"); + } +} + +void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta) +{ + const int size = abs(meta->size); + const unsigned long start = meta->addr; + const struct kmem_cache *const cache = meta->cache; + + lockdep_assert_held(&meta->lock); + + if (meta->state == KFENCE_OBJECT_UNUSED) { + seq_con_printf(seq, "kfence-#%zd unused\n", meta - kfence_metadata); + return; + } + + seq_con_printf(seq, + "kfence-#%zd [0x" PTR_FMT "-0x" PTR_FMT + ", size=%d, cache=%s] allocated by task %d:\n", + meta - kfence_metadata, (void *)start, (void *)(start + size - 1), size, + (cache && cache->name) ? cache->name : "", meta->alloc_track.pid); + kfence_print_stack(seq, meta, true); + + if (meta->state == KFENCE_OBJECT_FREED) { + seq_con_printf(seq, "\nfreed by task %d:\n", meta->free_track.pid); + kfence_print_stack(seq, meta, false); + } +} + +/* + * Show bytes at @addr that are different from the expected canary values, up to + * @max_bytes. + */ +static void print_diff_canary(unsigned long address, size_t bytes_to_show, + const struct kfence_metadata *meta) +{ + const unsigned long show_until_addr = address + bytes_to_show; + const u8 *cur, *end; + + /* Do not show contents of object nor read into following guard page. */ + end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr) + : min(show_until_addr, PAGE_ALIGN(address))); + + pr_cont("["); + for (cur = (const u8 *)address; cur < end; cur++) { + if (*cur == KFENCE_CANARY_PATTERN(cur)) + pr_cont(" ."); + else if (IS_ENABLED(CONFIG_DEBUG_KERNEL)) + pr_cont(" 0x%02x", *cur); + else /* Do not leak kernel memory in non-debug builds. */ + pr_cont(" !"); + } + pr_cont(" ]"); +} + +void kfence_report_error(unsigned long address, const struct kfence_metadata *meta, + enum kfence_error_type type) +{ + unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 }; + int num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1); + int skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type); + const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1; + + /* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */ + if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta)) + return; + + if (meta) + lockdep_assert_held(&meta->lock); + /* + * Because we may generate reports in printk-unfriendly parts of the + * kernel, such as scheduler code, the use of printk() could deadlock. + * Until such time that all printing code here is safe in all parts of + * the kernel, accept the risk, and just get our message out (given the + * system might already behave unpredictably due to the memory error). + * As such, also disable lockdep to hide warnings, and avoid disabling + * lockdep for the rest of the kernel. + */ + lockdep_off(); + + pr_err("==================================================================\n"); + /* Print report header. */ + switch (type) { + case KFENCE_ERROR_OOB: { + const bool left_of_object = address < meta->addr; + + pr_err("BUG: KFENCE: out-of-bounds in %pS\n\n", (void *)stack_entries[skipnr]); + pr_err("Out-of-bounds access at 0x" PTR_FMT " (%luB %s of kfence-#%zd):\n", + (void *)address, + left_of_object ? meta->addr - address : address - meta->addr, + left_of_object ? "left" : "right", object_index); + break; + } + case KFENCE_ERROR_UAF: + pr_err("BUG: KFENCE: use-after-free in %pS\n\n", (void *)stack_entries[skipnr]); + pr_err("Use-after-free access at 0x" PTR_FMT " (in kfence-#%zd):\n", + (void *)address, object_index); + break; + case KFENCE_ERROR_CORRUPTION: + pr_err("BUG: KFENCE: memory corruption in %pS\n\n", (void *)stack_entries[skipnr]); + pr_err("Corrupted memory at 0x" PTR_FMT " ", (void *)address); + print_diff_canary(address, 16, meta); + pr_cont(" (in kfence-#%zd):\n", object_index); + break; + case KFENCE_ERROR_INVALID: + pr_err("BUG: KFENCE: invalid access in %pS\n\n", (void *)stack_entries[skipnr]); + pr_err("Invalid access at 0x" PTR_FMT ":\n", (void *)address); + break; + case KFENCE_ERROR_INVALID_FREE: + pr_err("BUG: KFENCE: invalid free in %pS\n\n", (void *)stack_entries[skipnr]); + pr_err("Invalid free of 0x" PTR_FMT " (in kfence-#%zd):\n", (void *)address, + object_index); + break; + } + + /* Print stack trace and object info. */ + stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, 0); + + if (meta) { + pr_err("\n"); + kfence_print_object(NULL, meta); + } + + /* Print report footer. */ + pr_err("\n"); + dump_stack_print_info(KERN_ERR); + pr_err("==================================================================\n"); + + lockdep_on(); + + if (panic_on_warn) + panic("panic_on_warn set ...\n"); + + /* We encountered a memory unsafety error, taint the kernel! */ + add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK); +} -- cgit v1.2.3-59-g8ed1b From d438fabce7860df3cb9337776be6f90b59ced8ed Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 25 Feb 2021 17:19:08 -0800 Subject: kfence: use pt_regs to generate stack trace on faults Instead of removing the fault handling portion of the stack trace based on the fault handler's name, just use struct pt_regs directly. Change kfence_handle_page_fault() to take a struct pt_regs, and plumb it through to kfence_report_error() for out-of-bounds, use-after-free, or invalid access errors, where pt_regs is used to generate the stack trace. If the kernel is a DEBUG_KERNEL, also show registers for more information. Link: https://lkml.kernel.org/r/20201105092133.2075331-1-elver@google.com Signed-off-by: Marco Elver Suggested-by: Mark Rutland Acked-by: Mark Rutland Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Jann Horn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/kfence.h | 2 -- arch/arm64/mm/fault.c | 2 +- arch/x86/include/asm/kfence.h | 6 ---- arch/x86/mm/fault.c | 2 +- include/linux/kfence.h | 5 ++-- mm/kfence/core.c | 10 +++---- mm/kfence/kfence.h | 4 +-- mm/kfence/report.c | 63 +++++++++++++++++++++++------------------ 8 files changed, 48 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index 42a06f83850a..d061176d57ea 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -10,8 +10,6 @@ #include -#define KFENCE_SKIP_ARCH_FAULT_HANDLER "el1_sync" - static inline bool arch_kfence_init_pool(void) { return true; } static inline bool kfence_protect_page(unsigned long addr, bool protect) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 42515900ab2e..56d9423ca59c 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -390,7 +390,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, } else if (addr < PAGE_SIZE) { msg = "NULL pointer dereference"; } else { - if (kfence_handle_page_fault(addr)) + if (kfence_handle_page_fault(addr, regs)) return; msg = "paging request"; diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h index a0659dbd93ea..97bbb4a9083a 100644 --- a/arch/x86/include/asm/kfence.h +++ b/arch/x86/include/asm/kfence.h @@ -16,12 +16,6 @@ #include #include -/* - * The page fault handler entry function, up to which the stack trace is - * truncated in reports. - */ -#define KFENCE_SKIP_ARCH_FAULT_HANDLER "asm_exc_page_fault" - /* Force 4K pages for __kfence_pool. */ static inline bool arch_kfence_init_pool(void) { diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 99fe6d3e690d..38868b4ce8b0 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -682,7 +682,7 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code, efi_crash_gracefully_on_page_fault(address); /* Only not-present faults should be handled by KFENCE. */ - if (!(error_code & X86_PF_PROT) && kfence_handle_page_fault(address)) + if (!(error_code & X86_PF_PROT) && kfence_handle_page_fault(address, regs)) return; oops: diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 81f3911cb298..5a56bcf5606c 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -186,6 +186,7 @@ static __always_inline __must_check bool kfence_free(void *addr) /** * kfence_handle_page_fault() - perform page fault handling for KFENCE pages * @addr: faulting address + * @regs: current struct pt_regs (can be NULL, but shows full stack trace) * * Return: * * false - address outside KFENCE pool, @@ -196,7 +197,7 @@ static __always_inline __must_check bool kfence_free(void *addr) * cases KFENCE prints an error message and marks the offending page as * present, so that the kernel can proceed. */ -bool __must_check kfence_handle_page_fault(unsigned long addr); +bool __must_check kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs); #else /* CONFIG_KFENCE */ @@ -209,7 +210,7 @@ static inline size_t kfence_ksize(const void *addr) { return 0; } static inline void *kfence_object_start(const void *addr) { return NULL; } static inline void __kfence_free(void *addr) { } static inline bool __must_check kfence_free(void *addr) { return false; } -static inline bool __must_check kfence_handle_page_fault(unsigned long addr) { return false; } +static inline bool __must_check kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs) { return false; } #endif diff --git a/mm/kfence/core.c b/mm/kfence/core.c index d6a32c13336b..61c76670a7a9 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -216,7 +216,7 @@ static inline bool check_canary_byte(u8 *addr) return true; atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); - kfence_report_error((unsigned long)addr, addr_to_metadata((unsigned long)addr), + kfence_report_error((unsigned long)addr, NULL, addr_to_metadata((unsigned long)addr), KFENCE_ERROR_CORRUPTION); return false; } @@ -351,7 +351,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { /* Invalid or double-free, bail out. */ atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); - kfence_report_error((unsigned long)addr, meta, KFENCE_ERROR_INVALID_FREE); + kfence_report_error((unsigned long)addr, NULL, meta, KFENCE_ERROR_INVALID_FREE); raw_spin_unlock_irqrestore(&meta->lock, flags); return; } @@ -766,7 +766,7 @@ void __kfence_free(void *addr) kfence_guarded_free(addr, meta, false); } -bool kfence_handle_page_fault(unsigned long addr) +bool kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs) { const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; struct kfence_metadata *to_report = NULL; @@ -829,11 +829,11 @@ bool kfence_handle_page_fault(unsigned long addr) out: if (to_report) { - kfence_report_error(addr, to_report, error_type); + kfence_report_error(addr, regs, to_report, error_type); raw_spin_unlock_irqrestore(&to_report->lock, flags); } else { /* This may be a UAF or OOB access, but we can't be sure. */ - kfence_report_error(addr, NULL, KFENCE_ERROR_INVALID); + kfence_report_error(addr, regs, NULL, KFENCE_ERROR_INVALID); } return kfence_unprotect(addr); /* Unprotect and let access proceed. */ diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index 1014060f9707..0d83e628a97d 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -105,8 +105,8 @@ enum kfence_error_type { KFENCE_ERROR_INVALID_FREE, /* Invalid free. */ }; -void kfence_report_error(unsigned long address, const struct kfence_metadata *meta, - enum kfence_error_type type); +void kfence_report_error(unsigned long address, struct pt_regs *regs, + const struct kfence_metadata *meta, enum kfence_error_type type); void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta); diff --git a/mm/kfence/report.c b/mm/kfence/report.c index 64f27c8d46a3..4dbfa9a382e4 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -41,7 +42,6 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries { char buf[64]; int skipnr, fallback = 0; - bool is_access_fault = false; if (type) { /* Depending on error type, find different stack entries. */ @@ -49,8 +49,12 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries case KFENCE_ERROR_UAF: case KFENCE_ERROR_OOB: case KFENCE_ERROR_INVALID: - is_access_fault = true; - break; + /* + * kfence_handle_page_fault() may be called with pt_regs + * set to NULL; in that case we'll simply show the full + * stack trace. + */ + return 0; case KFENCE_ERROR_CORRUPTION: case KFENCE_ERROR_INVALID_FREE: break; @@ -60,26 +64,21 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries for (skipnr = 0; skipnr < num_entries; skipnr++) { int len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skipnr]); - if (is_access_fault) { - if (!strncmp(buf, KFENCE_SKIP_ARCH_FAULT_HANDLER, len)) - goto found; - } else { - if (str_has_prefix(buf, "kfence_") || str_has_prefix(buf, "__kfence_") || - !strncmp(buf, "__slab_free", len)) { - /* - * In case of tail calls from any of the below - * to any of the above. - */ - fallback = skipnr + 1; - } - - /* Also the *_bulk() variants by only checking prefixes. */ - if (str_has_prefix(buf, "kfree") || - str_has_prefix(buf, "kmem_cache_free") || - str_has_prefix(buf, "__kmalloc") || - str_has_prefix(buf, "kmem_cache_alloc")) - goto found; + if (str_has_prefix(buf, "kfence_") || str_has_prefix(buf, "__kfence_") || + !strncmp(buf, "__slab_free", len)) { + /* + * In case of tail calls from any of the below + * to any of the above. + */ + fallback = skipnr + 1; } + + /* Also the *_bulk() variants by only checking prefixes. */ + if (str_has_prefix(buf, "kfree") || + str_has_prefix(buf, "kmem_cache_free") || + str_has_prefix(buf, "__kmalloc") || + str_has_prefix(buf, "kmem_cache_alloc")) + goto found; } if (fallback < num_entries) return fallback; @@ -157,13 +156,20 @@ static void print_diff_canary(unsigned long address, size_t bytes_to_show, pr_cont(" ]"); } -void kfence_report_error(unsigned long address, const struct kfence_metadata *meta, - enum kfence_error_type type) +void kfence_report_error(unsigned long address, struct pt_regs *regs, + const struct kfence_metadata *meta, enum kfence_error_type type) { unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 }; - int num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1); - int skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type); const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1; + int num_stack_entries; + int skipnr = 0; + + if (regs) { + num_stack_entries = stack_trace_save_regs(regs, stack_entries, KFENCE_STACK_DEPTH, 0); + } else { + num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1); + skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type); + } /* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */ if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta)) @@ -227,7 +233,10 @@ void kfence_report_error(unsigned long address, const struct kfence_metadata *me /* Print report footer. */ pr_err("\n"); - dump_stack_print_info(KERN_ERR); + if (IS_ENABLED(CONFIG_DEBUG_KERNEL) && regs) + show_regs(regs); + else + dump_stack_print_info(KERN_ERR); pr_err("==================================================================\n"); lockdep_on(); -- cgit v1.2.3-59-g8ed1b From d3fb45f370d927224af35d22d34ea465884afec8 Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Thu, 25 Feb 2021 17:19:11 -0800 Subject: mm, kfence: insert KFENCE hooks for SLAB Inserts KFENCE hooks into the SLAB allocator. To pass the originally requested size to KFENCE, add an argument 'orig_size' to slab_alloc*(). The additional argument is required to preserve the requested original size for kmalloc() allocations, which uses size classes (e.g. an allocation of 272 bytes will return an object of size 512). Therefore, kmem_cache::size does not represent the kmalloc-caller's requested size, and we must introduce the argument 'orig_size' to propagate the originally requested size to KFENCE. Without the originally requested size, we would not be able to detect out-of-bounds accesses for objects placed at the end of a KFENCE object page if that object is not equal to the kmalloc-size class it was bucketed into. When KFENCE is disabled, there is no additional overhead, since slab_alloc*() functions are __always_inline. Link: https://lkml.kernel.org/r/20201103175841.3495947-5-elver@google.com Signed-off-by: Marco Elver Signed-off-by: Alexander Potapenko Reviewed-by: Dmitry Vyukov Co-developed-by: Marco Elver Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Catalin Marinas Cc: Dave Hansen Cc: Eric Dumazet Cc: Greg Kroah-Hartman Cc: Hillf Danton Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jann Horn Cc: Joern Engel Cc: Jonathan Corbet Cc: Kees Cook Cc: Mark Rutland Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: SeongJae Park Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab_def.h | 3 +++ mm/kfence/core.c | 2 ++ mm/slab.c | 38 +++++++++++++++++++++++++++++--------- mm/slab_common.c | 5 ++++- 4 files changed, 38 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 9eb430c163c2..3aa5e1e73ab6 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -2,6 +2,7 @@ #ifndef _LINUX_SLAB_DEF_H #define _LINUX_SLAB_DEF_H +#include #include /* @@ -114,6 +115,8 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, static inline int objs_per_slab_page(const struct kmem_cache *cache, const struct page *page) { + if (is_kfence_address(page_address(page))) + return 1; return cache->num; } diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 61c76670a7a9..05c18aa11851 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -317,6 +317,8 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g /* Set required struct page fields. */ page = virt_to_page(meta->addr); page->slab_cache = cache; + if (IS_ENABLED(CONFIG_SLAB)) + page->s_mem = addr; raw_spin_unlock_irqrestore(&meta->lock, flags); diff --git a/mm/slab.c b/mm/slab.c index 35c68d99d460..51fd424e0d6d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -100,6 +100,7 @@ #include #include #include +#include #include #include #include @@ -3208,7 +3209,7 @@ must_grow: } static __always_inline void * -slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size, unsigned long caller) { unsigned long save_flags; @@ -3221,6 +3222,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, if (unlikely(!cachep)) return NULL; + ptr = kfence_alloc(cachep, orig_size, flags); + if (unlikely(ptr)) + goto out_hooks; + cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); @@ -3253,6 +3258,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr) memset(ptr, 0, cachep->object_size); +out_hooks: slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr); return ptr; } @@ -3290,7 +3296,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) #endif /* CONFIG_NUMA */ static __always_inline void * -slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) +slab_alloc(struct kmem_cache *cachep, gfp_t flags, size_t orig_size, unsigned long caller) { unsigned long save_flags; void *objp; @@ -3301,6 +3307,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) if (unlikely(!cachep)) return NULL; + objp = kfence_alloc(cachep, orig_size, flags); + if (unlikely(objp)) + goto out; + cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); objp = __do_cache_alloc(cachep, flags); @@ -3311,6 +3321,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp) memset(objp, 0, cachep->object_size); +out: slab_post_alloc_hook(cachep, objcg, flags, 1, &objp); return objp; } @@ -3416,6 +3427,12 @@ free_done: static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, unsigned long caller) { + if (is_kfence_address(objp)) { + kmemleak_free_recursive(objp, cachep->flags); + __kfence_free(objp); + return; + } + if (unlikely(slab_want_init_on_free(cachep))) memset(objp, 0, cachep->object_size); @@ -3482,7 +3499,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp, */ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { - void *ret = slab_alloc(cachep, flags, _RET_IP_); + void *ret = slab_alloc(cachep, flags, cachep->object_size, _RET_IP_); trace_kmem_cache_alloc(_RET_IP_, ret, cachep->object_size, cachep->size, flags); @@ -3515,7 +3532,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, local_irq_disable(); for (i = 0; i < size; i++) { - void *objp = __do_cache_alloc(s, flags); + void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags); if (unlikely(!objp)) goto error; @@ -3548,7 +3565,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) { void *ret; - ret = slab_alloc(cachep, flags, _RET_IP_); + ret = slab_alloc(cachep, flags, size, _RET_IP_); ret = kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc(_RET_IP_, ret, @@ -3574,7 +3591,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); */ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { - void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); + void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_); trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep->object_size, cachep->size, @@ -3592,7 +3609,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, { void *ret; - ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); + ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_); ret = kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc_node(_RET_IP_, ret, @@ -3673,7 +3690,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, cachep = kmalloc_slab(size, flags); if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; - ret = slab_alloc(cachep, flags, caller); + ret = slab_alloc(cachep, flags, size, caller); ret = kasan_kmalloc(cachep, ret, size, flags); trace_kmalloc(caller, ret, @@ -4172,7 +4189,10 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, BUG_ON(objnr >= cachep->num); /* Find offset within object. */ - offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); + if (is_kfence_address(ptr)) + offset = ptr - kfence_object_start(ptr); + else + offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); /* Allow address range falling entirely within usercopy region. */ if (offset >= cachep->useroffset && diff --git a/mm/slab_common.c b/mm/slab_common.c index 7c8298c17145..284954ef1da5 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -430,6 +431,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) rcu_barrier(); list_for_each_entry_safe(s, s2, &to_destroy, list) { + kfence_shutdown_cache(s); #ifdef SLAB_SUPPORTS_SYSFS sysfs_slab_release(s); #else @@ -455,6 +457,7 @@ static int shutdown_cache(struct kmem_cache *s) list_add_tail(&s->list, &slab_caches_to_rcu_destroy); schedule_work(&slab_caches_to_rcu_destroy_work); } else { + kfence_shutdown_cache(s); #ifdef SLAB_SUPPORTS_SYSFS sysfs_slab_unlink(s); sysfs_slab_release(s); @@ -1235,7 +1238,7 @@ size_t ksize(const void *objp) if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp)) return 0; - size = __ksize(objp); + size = kfence_ksize(objp) ?: __ksize(objp); /* * We assume that ksize callers could use whole allocated area, * so we need to unpoison this area. -- cgit v1.2.3-59-g8ed1b From b89fb5ef0ce611b5db8eb9d3a5a7fcaab2cbe9e4 Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Thu, 25 Feb 2021 17:19:16 -0800 Subject: mm, kfence: insert KFENCE hooks for SLUB Inserts KFENCE hooks into the SLUB allocator. To pass the originally requested size to KFENCE, add an argument 'orig_size' to slab_alloc*(). The additional argument is required to preserve the requested original size for kmalloc() allocations, which uses size classes (e.g. an allocation of 272 bytes will return an object of size 512). Therefore, kmem_cache::size does not represent the kmalloc-caller's requested size, and we must introduce the argument 'orig_size' to propagate the originally requested size to KFENCE. Without the originally requested size, we would not be able to detect out-of-bounds accesses for objects placed at the end of a KFENCE object page if that object is not equal to the kmalloc-size class it was bucketed into. When KFENCE is disabled, there is no additional overhead, since slab_alloc*() functions are __always_inline. Link: https://lkml.kernel.org/r/20201103175841.3495947-6-elver@google.com Signed-off-by: Marco Elver Signed-off-by: Alexander Potapenko Reviewed-by: Dmitry Vyukov Reviewed-by: Jann Horn Co-developed-by: Marco Elver Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christopher Lameter Cc: Dave Hansen Cc: David Rientjes Cc: Eric Dumazet Cc: Greg Kroah-Hartman Cc: Hillf Danton Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Joern Engel Cc: Jonathan Corbet Cc: Joonsoo Kim Cc: Kees Cook Cc: Mark Rutland Cc: Paul E. McKenney Cc: Pekka Enberg Cc: Peter Zijlstra Cc: SeongJae Park Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slub_def.h | 3 +++ mm/kfence/core.c | 2 ++ mm/slub.c | 60 +++++++++++++++++++++++++++++++++++++----------- 3 files changed, 51 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 1be0ed5befa1..dcde82a4434c 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -7,6 +7,7 @@ * * (C) 2007 SGI, Christoph Lameter */ +#include #include #include @@ -185,6 +186,8 @@ static inline unsigned int __obj_to_index(const struct kmem_cache *cache, static inline unsigned int obj_to_index(const struct kmem_cache *cache, const struct page *page, void *obj) { + if (is_kfence_address(obj)) + return 0; return __obj_to_index(cache, page_address(page), obj); } diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 05c18aa11851..7692af715fdb 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -317,6 +317,8 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g /* Set required struct page fields. */ page = virt_to_page(meta->addr); page->slab_cache = cache; + if (IS_ENABLED(CONFIG_SLUB)) + page->objects = 1; if (IS_ENABLED(CONFIG_SLAB)) page->s_mem = addr; diff --git a/mm/slub.c b/mm/slub.c index b2833ce85c92..383616af28c4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -1570,6 +1571,11 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, void *old_tail = *tail ? *tail : *head; int rsize; + if (is_kfence_address(next)) { + slab_free_hook(s, next); + return true; + } + /* Head and tail of the reconstructed freelist */ *head = NULL; *tail = NULL; @@ -2809,7 +2815,7 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, * Otherwise we can simply pick the next object from the lockless free list. */ static __always_inline void *slab_alloc_node(struct kmem_cache *s, - gfp_t gfpflags, int node, unsigned long addr) + gfp_t gfpflags, int node, unsigned long addr, size_t orig_size) { void *object; struct kmem_cache_cpu *c; @@ -2820,6 +2826,11 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); if (!s) return NULL; + + object = kfence_alloc(s, orig_size, gfpflags); + if (unlikely(object)) + goto out; + redo: /* * Must read kmem_cache cpu data via this cpu ptr. Preemption is @@ -2892,20 +2903,21 @@ redo: if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) memset(kasan_reset_tag(object), 0, s->object_size); +out: slab_post_alloc_hook(s, objcg, gfpflags, 1, &object); return object; } static __always_inline void *slab_alloc(struct kmem_cache *s, - gfp_t gfpflags, unsigned long addr) + gfp_t gfpflags, unsigned long addr, size_t orig_size) { - return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); + return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size); } void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { - void *ret = slab_alloc(s, gfpflags, _RET_IP_); + void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size); trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); @@ -2917,7 +2929,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_TRACING void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { - void *ret = slab_alloc(s, gfpflags, _RET_IP_); + void *ret = slab_alloc(s, gfpflags, _RET_IP_, size); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; @@ -2928,7 +2940,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); #ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { - void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); + void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size); trace_kmem_cache_alloc_node(_RET_IP_, ret, s->object_size, s->size, gfpflags, node); @@ -2942,7 +2954,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) { - void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); + void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size); trace_kmalloc_node(_RET_IP_, ret, size, s->size, gfpflags, node); @@ -2976,6 +2988,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page, stat(s, FREE_SLOWPATH); + if (kfence_free(head)) + return; + if (kmem_cache_debug(s) && !free_debug_processing(s, page, head, tail, cnt, addr)) return; @@ -3220,6 +3235,13 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, df->s = cache_from_obj(s, object); /* Support for memcg */ } + if (is_kfence_address(object)) { + slab_free_hook(df->s, object); + __kfence_free(object); + p[size] = NULL; /* mark object processed */ + return size; + } + /* Start new detached freelist */ df->page = page; set_freepointer(df->s, object, NULL); @@ -3295,8 +3317,14 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, c = this_cpu_ptr(s->cpu_slab); for (i = 0; i < size; i++) { - void *object = c->freelist; + void *object = kfence_alloc(s, s->object_size, flags); + if (unlikely(object)) { + p[i] = object; + continue; + } + + object = c->freelist; if (unlikely(!object)) { /* * We may have removed an object from c->freelist using @@ -4021,7 +4049,7 @@ void *__kmalloc(size_t size, gfp_t flags) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, flags, _RET_IP_); + ret = slab_alloc(s, flags, _RET_IP_, size); trace_kmalloc(_RET_IP_, ret, size, s->size, flags); @@ -4069,7 +4097,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc_node(s, flags, node, _RET_IP_); + ret = slab_alloc_node(s, flags, node, _RET_IP_, size); trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); @@ -4095,6 +4123,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, struct kmem_cache *s; unsigned int offset; size_t object_size; + bool is_kfence = is_kfence_address(ptr); ptr = kasan_reset_tag(ptr); @@ -4107,10 +4136,13 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, to_user, 0, n); /* Find offset within object. */ - offset = (ptr - page_address(page)) % s->size; + if (is_kfence) + offset = ptr - kfence_object_start(ptr); + else + offset = (ptr - page_address(page)) % s->size; /* Adjust for redzone and reject if within the redzone. */ - if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { + if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { if (offset < s->red_left_pad) usercopy_abort("SLUB object in left red zone", s->name, to_user, offset, n); @@ -4527,7 +4559,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, gfpflags, caller); + ret = slab_alloc(s, gfpflags, caller, size); /* Honor the call site pointer we received. */ trace_kmalloc(caller, ret, size, s->size, gfpflags); @@ -4558,7 +4590,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc_node(s, gfpflags, node, caller); + ret = slab_alloc_node(s, gfpflags, node, caller, size); /* Honor the call site pointer we received. */ trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); -- cgit v1.2.3-59-g8ed1b From bc8fbc5f305aecf63423da91e5faf4c0ce40bf38 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 25 Feb 2021 17:19:31 -0800 Subject: kfence: add test suite Add KFENCE test suite, testing various error detection scenarios. Makes use of KUnit for test organization. Since KFENCE's interface to obtain error reports is via the console, the test verifies that KFENCE outputs expected reports to the console. [elver@google.com: fix typo in test] Link: https://lkml.kernel.org/r/X9lHQExmHGvETxY4@elver.google.com [elver@google.com: show access type in report] Link: https://lkml.kernel.org/r/20210111091544.3287013-2-elver@google.com Link: https://lkml.kernel.org/r/20201103175841.3495947-9-elver@google.com Signed-off-by: Alexander Potapenko Signed-off-by: Marco Elver Reviewed-by: Dmitry Vyukov Co-developed-by: Alexander Potapenko Reviewed-by: Jann Horn Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christopher Lameter Cc: Dave Hansen Cc: David Rientjes Cc: Eric Dumazet Cc: Greg Kroah-Hartman Cc: Hillf Danton Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Joern Engel Cc: Jonathan Corbet Cc: Joonsoo Kim Cc: Kees Cook Cc: Mark Rutland Cc: Paul E. McKenney Cc: Pekka Enberg Cc: Peter Zijlstra Cc: SeongJae Park Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/dev-tools/kfence.rst | 12 +- arch/arm64/mm/fault.c | 2 +- arch/x86/mm/fault.c | 3 +- include/linux/kfence.h | 9 +- lib/Kconfig.kfence | 13 + mm/kfence/Makefile | 3 + mm/kfence/core.c | 11 +- mm/kfence/kfence.h | 2 +- mm/kfence/kfence_test.c | 858 +++++++++++++++++++++++++++++++++++++ mm/kfence/report.c | 27 +- 10 files changed, 915 insertions(+), 25 deletions(-) create mode 100644 mm/kfence/kfence_test.c (limited to 'include/linux') diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst index 0e2fb6ef3016..58a0a5fa1ddc 100644 --- a/Documentation/dev-tools/kfence.rst +++ b/Documentation/dev-tools/kfence.rst @@ -65,9 +65,9 @@ Error reports A typical out-of-bounds access looks like this:: ================================================================== - BUG: KFENCE: out-of-bounds in test_out_of_bounds_read+0xa3/0x22b + BUG: KFENCE: out-of-bounds read in test_out_of_bounds_read+0xa3/0x22b - Out-of-bounds access at 0xffffffffb672efff (1B left of kfence-#17): + Out-of-bounds read at 0xffffffffb672efff (1B left of kfence-#17): test_out_of_bounds_read+0xa3/0x22b kunit_try_run_case+0x51/0x85 kunit_generic_run_threadfn_adapter+0x16/0x30 @@ -94,9 +94,9 @@ its origin. Note that, real kernel addresses are only shown for Use-after-free accesses are reported as:: ================================================================== - BUG: KFENCE: use-after-free in test_use_after_free_read+0xb3/0x143 + BUG: KFENCE: use-after-free read in test_use_after_free_read+0xb3/0x143 - Use-after-free access at 0xffffffffb673dfe0 (in kfence-#24): + Use-after-free read at 0xffffffffb673dfe0 (in kfence-#24): test_use_after_free_read+0xb3/0x143 kunit_try_run_case+0x51/0x85 kunit_generic_run_threadfn_adapter+0x16/0x30 @@ -193,9 +193,9 @@ where it was not possible to determine an associated object, e.g. if adjacent object pages had not yet been allocated:: ================================================================== - BUG: KFENCE: invalid access in test_invalid_access+0x26/0xe0 + BUG: KFENCE: invalid read in test_invalid_access+0x26/0xe0 - Invalid access at 0xffffffffb670b00a: + Invalid read at 0xffffffffb670b00a: test_invalid_access+0x26/0xe0 kunit_try_run_case+0x51/0x85 kunit_generic_run_threadfn_adapter+0x16/0x30 diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 56d9423ca59c..f37d4e3830b7 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -390,7 +390,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, } else if (addr < PAGE_SIZE) { msg = "NULL pointer dereference"; } else { - if (kfence_handle_page_fault(addr, regs)) + if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) return; msg = "paging request"; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 38868b4ce8b0..a73347e2cdfc 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -682,7 +682,8 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code, efi_crash_gracefully_on_page_fault(address); /* Only not-present faults should be handled by KFENCE. */ - if (!(error_code & X86_PF_PROT) && kfence_handle_page_fault(address, regs)) + if (!(error_code & X86_PF_PROT) && + kfence_handle_page_fault(address, error_code & X86_PF_WRITE, regs)) return; oops: diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 5a56bcf5606c..a70d1ea03532 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -186,6 +186,7 @@ static __always_inline __must_check bool kfence_free(void *addr) /** * kfence_handle_page_fault() - perform page fault handling for KFENCE pages * @addr: faulting address + * @is_write: is access a write * @regs: current struct pt_regs (can be NULL, but shows full stack trace) * * Return: @@ -197,7 +198,7 @@ static __always_inline __must_check bool kfence_free(void *addr) * cases KFENCE prints an error message and marks the offending page as * present, so that the kernel can proceed. */ -bool __must_check kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs); +bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs); #else /* CONFIG_KFENCE */ @@ -210,7 +211,11 @@ static inline size_t kfence_ksize(const void *addr) { return 0; } static inline void *kfence_object_start(const void *addr) { return NULL; } static inline void __kfence_free(void *addr) { } static inline bool __must_check kfence_free(void *addr) { return false; } -static inline bool __must_check kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs) { return false; } +static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, + struct pt_regs *regs) +{ + return false; +} #endif diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence index 605125ac2ae0..78f50ccb3b45 100644 --- a/lib/Kconfig.kfence +++ b/lib/Kconfig.kfence @@ -66,4 +66,17 @@ config KFENCE_STRESS_TEST_FAULTS Only for KFENCE testing; set to 0 if you are not a KFENCE developer. +config KFENCE_KUNIT_TEST + tristate "KFENCE integration test suite" if !KUNIT_ALL_TESTS + default KUNIT_ALL_TESTS + depends on TRACEPOINTS && KUNIT + help + Test suite for KFENCE, testing various error detection scenarios with + various allocation types, and checking that reports are correctly + output to console. + + Say Y here if you want the test to be built into the kernel and run + during boot; say M if you want the test to build as a module; say N + if you are unsure. + endif # KFENCE diff --git a/mm/kfence/Makefile b/mm/kfence/Makefile index d991e9a349f0..6872cd5e5390 100644 --- a/mm/kfence/Makefile +++ b/mm/kfence/Makefile @@ -1,3 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_KFENCE) := core.o report.o + +CFLAGS_kfence_test.o := -g -fno-omit-frame-pointer -fno-optimize-sibling-calls +obj-$(CONFIG_KFENCE_KUNIT_TEST) += kfence_test.o diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 7692af715fdb..cfe3d32ac5b7 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -216,7 +216,7 @@ static inline bool check_canary_byte(u8 *addr) return true; atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); - kfence_report_error((unsigned long)addr, NULL, addr_to_metadata((unsigned long)addr), + kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr), KFENCE_ERROR_CORRUPTION); return false; } @@ -355,7 +355,8 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { /* Invalid or double-free, bail out. */ atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); - kfence_report_error((unsigned long)addr, NULL, meta, KFENCE_ERROR_INVALID_FREE); + kfence_report_error((unsigned long)addr, false, NULL, meta, + KFENCE_ERROR_INVALID_FREE); raw_spin_unlock_irqrestore(&meta->lock, flags); return; } @@ -770,7 +771,7 @@ void __kfence_free(void *addr) kfence_guarded_free(addr, meta, false); } -bool kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs) +bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) { const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; struct kfence_metadata *to_report = NULL; @@ -833,11 +834,11 @@ bool kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs) out: if (to_report) { - kfence_report_error(addr, regs, to_report, error_type); + kfence_report_error(addr, is_write, regs, to_report, error_type); raw_spin_unlock_irqrestore(&to_report->lock, flags); } else { /* This may be a UAF or OOB access, but we can't be sure. */ - kfence_report_error(addr, regs, NULL, KFENCE_ERROR_INVALID); + kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID); } return kfence_unprotect(addr); /* Unprotect and let access proceed. */ diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index 0d83e628a97d..1accc840dbbe 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -105,7 +105,7 @@ enum kfence_error_type { KFENCE_ERROR_INVALID_FREE, /* Invalid free. */ }; -void kfence_report_error(unsigned long address, struct pt_regs *regs, +void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs, const struct kfence_metadata *meta, enum kfence_error_type type); void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta); diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c new file mode 100644 index 000000000000..db1bb596acaf --- /dev/null +++ b/mm/kfence/kfence_test.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test cases for KFENCE memory safety error detector. Since the interface with + * which KFENCE's reports are obtained is via the console, this is the output we + * should verify. For each test case checks the presence (or absence) of + * generated reports. Relies on 'console' tracepoint to capture reports as they + * appear in the kernel log. + * + * Copyright (C) 2020, Google LLC. + * Author: Alexander Potapenko + * Marco Elver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kfence.h" + +/* Report as observed from console. */ +static struct { + spinlock_t lock; + int nlines; + char lines[2][256]; +} observed = { + .lock = __SPIN_LOCK_UNLOCKED(observed.lock), +}; + +/* Probe for console output: obtains observed lines of interest. */ +static void probe_console(void *ignore, const char *buf, size_t len) +{ + unsigned long flags; + int nlines; + + spin_lock_irqsave(&observed.lock, flags); + nlines = observed.nlines; + + if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) { + /* + * KFENCE report and related to the test. + * + * The provided @buf is not NUL-terminated; copy no more than + * @len bytes and let strscpy() add the missing NUL-terminator. + */ + strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0]))); + nlines = 1; + } else if (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) { + strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0]))); + } + + WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */ + spin_unlock_irqrestore(&observed.lock, flags); +} + +/* Check if a report related to the test exists. */ +static bool report_available(void) +{ + return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines); +} + +/* Information we expect in a report. */ +struct expect_report { + enum kfence_error_type type; /* The type or error. */ + void *fn; /* Function pointer to expected function where access occurred. */ + char *addr; /* Address at which the bad access occurred. */ + bool is_write; /* Is access a write. */ +}; + +static const char *get_access_type(const struct expect_report *r) +{ + return r->is_write ? "write" : "read"; +} + +/* Check observed report matches information in @r. */ +static bool report_matches(const struct expect_report *r) +{ + bool ret = false; + unsigned long flags; + typeof(observed.lines) expect; + const char *end; + char *cur; + + /* Doubled-checked locking. */ + if (!report_available()) + return false; + + /* Generate expected report contents. */ + + /* Title */ + cur = expect[0]; + end = &expect[0][sizeof(expect[0]) - 1]; + switch (r->type) { + case KFENCE_ERROR_OOB: + cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s", + get_access_type(r)); + break; + case KFENCE_ERROR_UAF: + cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s", + get_access_type(r)); + break; + case KFENCE_ERROR_CORRUPTION: + cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption"); + break; + case KFENCE_ERROR_INVALID: + cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s", + get_access_type(r)); + break; + case KFENCE_ERROR_INVALID_FREE: + cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free"); + break; + } + + scnprintf(cur, end - cur, " in %pS", r->fn); + /* The exact offset won't match, remove it; also strip module name. */ + cur = strchr(expect[0], '+'); + if (cur) + *cur = '\0'; + + /* Access information */ + cur = expect[1]; + end = &expect[1][sizeof(expect[1]) - 1]; + + switch (r->type) { + case KFENCE_ERROR_OOB: + cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r)); + break; + case KFENCE_ERROR_UAF: + cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r)); + break; + case KFENCE_ERROR_CORRUPTION: + cur += scnprintf(cur, end - cur, "Corrupted memory at"); + break; + case KFENCE_ERROR_INVALID: + cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r)); + break; + case KFENCE_ERROR_INVALID_FREE: + cur += scnprintf(cur, end - cur, "Invalid free of"); + break; + } + + cur += scnprintf(cur, end - cur, " 0x" PTR_FMT, (void *)r->addr); + + spin_lock_irqsave(&observed.lock, flags); + if (!report_available()) + goto out; /* A new report is being captured. */ + + /* Finally match expected output to what we actually observed. */ + ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]); +out: + spin_unlock_irqrestore(&observed.lock, flags); + return ret; +} + +/* ===== Test cases ===== */ + +#define TEST_PRIV_WANT_MEMCACHE ((void *)1) + +/* Cache used by tests; if NULL, allocate from kmalloc instead. */ +static struct kmem_cache *test_cache; + +static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags, + void (*ctor)(void *)) +{ + if (test->priv != TEST_PRIV_WANT_MEMCACHE) + return size; + + kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor); + + /* + * Use SLAB_NOLEAKTRACE to prevent merging with existing caches. Any + * other flag in SLAB_NEVER_MERGE also works. Use SLAB_ACCOUNT to + * allocate via memcg, if enabled. + */ + flags |= SLAB_NOLEAKTRACE | SLAB_ACCOUNT; + test_cache = kmem_cache_create("test", size, 1, flags, ctor); + KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache"); + + return size; +} + +static void test_cache_destroy(void) +{ + if (!test_cache) + return; + + kmem_cache_destroy(test_cache); + test_cache = NULL; +} + +static inline size_t kmalloc_cache_alignment(size_t size) +{ + return kmalloc_caches[kmalloc_type(GFP_KERNEL)][kmalloc_index(size)]->align; +} + +/* Must always inline to match stack trace against caller. */ +static __always_inline void test_free(void *ptr) +{ + if (test_cache) + kmem_cache_free(test_cache, ptr); + else + kfree(ptr); +} + +/* + * If this should be a KFENCE allocation, and on which side the allocation and + * the closest guard page should be. + */ +enum allocation_policy { + ALLOCATE_ANY, /* KFENCE, any side. */ + ALLOCATE_LEFT, /* KFENCE, left side of page. */ + ALLOCATE_RIGHT, /* KFENCE, right side of page. */ + ALLOCATE_NONE, /* No KFENCE allocation. */ +}; + +/* + * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the + * current test_cache if set up. + */ +static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy) +{ + void *alloc; + unsigned long timeout, resched_after; + const char *policy_name; + + switch (policy) { + case ALLOCATE_ANY: + policy_name = "any"; + break; + case ALLOCATE_LEFT: + policy_name = "left"; + break; + case ALLOCATE_RIGHT: + policy_name = "right"; + break; + case ALLOCATE_NONE: + policy_name = "none"; + break; + } + + kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp, + policy_name, !!test_cache); + + /* + * 100x the sample interval should be more than enough to ensure we get + * a KFENCE allocation eventually. + */ + timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL); + /* + * Especially for non-preemption kernels, ensure the allocation-gate + * timer can catch up: after @resched_after, every failed allocation + * attempt yields, to ensure the allocation-gate timer is scheduled. + */ + resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL); + do { + if (test_cache) + alloc = kmem_cache_alloc(test_cache, gfp); + else + alloc = kmalloc(size, gfp); + + if (is_kfence_address(alloc)) { + struct page *page = virt_to_head_page(alloc); + struct kmem_cache *s = test_cache ?: kmalloc_caches[kmalloc_type(GFP_KERNEL)][kmalloc_index(size)]; + + /* + * Verify that various helpers return the right values + * even for KFENCE objects; these are required so that + * memcg accounting works correctly. + */ + KUNIT_EXPECT_EQ(test, obj_to_index(s, page, alloc), 0U); + KUNIT_EXPECT_EQ(test, objs_per_slab_page(s, page), 1); + + if (policy == ALLOCATE_ANY) + return alloc; + if (policy == ALLOCATE_LEFT && IS_ALIGNED((unsigned long)alloc, PAGE_SIZE)) + return alloc; + if (policy == ALLOCATE_RIGHT && + !IS_ALIGNED((unsigned long)alloc, PAGE_SIZE)) + return alloc; + } else if (policy == ALLOCATE_NONE) + return alloc; + + test_free(alloc); + + if (time_after(jiffies, resched_after)) + cond_resched(); + } while (time_before(jiffies, timeout)); + + KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE"); + return NULL; /* Unreachable. */ +} + +static void test_out_of_bounds_read(struct kunit *test) +{ + size_t size = 32; + struct expect_report expect = { + .type = KFENCE_ERROR_OOB, + .fn = test_out_of_bounds_read, + .is_write = false, + }; + char *buf; + + setup_test_cache(test, size, 0, NULL); + + /* + * If we don't have our own cache, adjust based on alignment, so that we + * actually access guard pages on either side. + */ + if (!test_cache) + size = kmalloc_cache_alignment(size); + + /* Test both sides. */ + + buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT); + expect.addr = buf - 1; + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free(buf); + + buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT); + expect.addr = buf + size; + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free(buf); +} + +static void test_out_of_bounds_write(struct kunit *test) +{ + size_t size = 32; + struct expect_report expect = { + .type = KFENCE_ERROR_OOB, + .fn = test_out_of_bounds_write, + .is_write = true, + }; + char *buf; + + setup_test_cache(test, size, 0, NULL); + buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT); + expect.addr = buf - 1; + WRITE_ONCE(*expect.addr, 42); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free(buf); +} + +static void test_use_after_free_read(struct kunit *test) +{ + const size_t size = 32; + struct expect_report expect = { + .type = KFENCE_ERROR_UAF, + .fn = test_use_after_free_read, + .is_write = false, + }; + + setup_test_cache(test, size, 0, NULL); + expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); + test_free(expect.addr); + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); +} + +static void test_double_free(struct kunit *test) +{ + const size_t size = 32; + struct expect_report expect = { + .type = KFENCE_ERROR_INVALID_FREE, + .fn = test_double_free, + }; + + setup_test_cache(test, size, 0, NULL); + expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); + test_free(expect.addr); + test_free(expect.addr); /* Double-free. */ + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); +} + +static void test_invalid_addr_free(struct kunit *test) +{ + const size_t size = 32; + struct expect_report expect = { + .type = KFENCE_ERROR_INVALID_FREE, + .fn = test_invalid_addr_free, + }; + char *buf; + + setup_test_cache(test, size, 0, NULL); + buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); + expect.addr = buf + 1; /* Free on invalid address. */ + test_free(expect.addr); /* Invalid address free. */ + test_free(buf); /* No error. */ + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); +} + +static void test_corruption(struct kunit *test) +{ + size_t size = 32; + struct expect_report expect = { + .type = KFENCE_ERROR_CORRUPTION, + .fn = test_corruption, + }; + char *buf; + + setup_test_cache(test, size, 0, NULL); + + /* Test both sides. */ + + buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT); + expect.addr = buf + size; + WRITE_ONCE(*expect.addr, 42); + test_free(buf); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + + buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT); + expect.addr = buf - 1; + WRITE_ONCE(*expect.addr, 42); + test_free(buf); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); +} + +/* + * KFENCE is unable to detect an OOB if the allocation's alignment requirements + * leave a gap between the object and the guard page. Specifically, an + * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB + * respectively. Therefore it is impossible for the allocated object to + * contiguously line up with the right guard page. + * + * However, we test that an access to memory beyond the gap results in KFENCE + * detecting an OOB access. + */ +static void test_kmalloc_aligned_oob_read(struct kunit *test) +{ + const size_t size = 73; + const size_t align = kmalloc_cache_alignment(size); + struct expect_report expect = { + .type = KFENCE_ERROR_OOB, + .fn = test_kmalloc_aligned_oob_read, + .is_write = false, + }; + char *buf; + + buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT); + + /* + * The object is offset to the right, so there won't be an OOB to the + * left of it. + */ + READ_ONCE(*(buf - 1)); + KUNIT_EXPECT_FALSE(test, report_available()); + + /* + * @buf must be aligned on @align, therefore buf + size belongs to the + * same page -> no OOB. + */ + READ_ONCE(*(buf + size)); + KUNIT_EXPECT_FALSE(test, report_available()); + + /* Overflowing by @align bytes will result in an OOB. */ + expect.addr = buf + size + align; + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + + test_free(buf); +} + +static void test_kmalloc_aligned_oob_write(struct kunit *test) +{ + const size_t size = 73; + struct expect_report expect = { + .type = KFENCE_ERROR_CORRUPTION, + .fn = test_kmalloc_aligned_oob_write, + }; + char *buf; + + buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT); + /* + * The object is offset to the right, so we won't get a page + * fault immediately after it. + */ + expect.addr = buf + size; + WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1); + KUNIT_EXPECT_FALSE(test, report_available()); + test_free(buf); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); +} + +/* Test cache shrinking and destroying with KFENCE. */ +static void test_shrink_memcache(struct kunit *test) +{ + const size_t size = 32; + void *buf; + + setup_test_cache(test, size, 0, NULL); + KUNIT_EXPECT_TRUE(test, test_cache); + buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); + kmem_cache_shrink(test_cache); + test_free(buf); + + KUNIT_EXPECT_FALSE(test, report_available()); +} + +static void ctor_set_x(void *obj) +{ + /* Every object has at least 8 bytes. */ + memset(obj, 'x', 8); +} + +/* Ensure that SL*B does not modify KFENCE objects on bulk free. */ +static void test_free_bulk(struct kunit *test) +{ + int iter; + + for (iter = 0; iter < 5; iter++) { + const size_t size = setup_test_cache(test, 8 + prandom_u32_max(300), 0, + (iter & 1) ? ctor_set_x : NULL); + void *objects[] = { + test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT), + test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE), + test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT), + test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE), + test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE), + }; + + kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects); + KUNIT_ASSERT_FALSE(test, report_available()); + test_cache_destroy(); + } +} + +/* Test init-on-free works. */ +static void test_init_on_free(struct kunit *test) +{ + const size_t size = 32; + struct expect_report expect = { + .type = KFENCE_ERROR_UAF, + .fn = test_init_on_free, + .is_write = false, + }; + int i; + + if (!IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON)) + return; + /* Assume it hasn't been disabled on command line. */ + + setup_test_cache(test, size, 0, NULL); + expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); + for (i = 0; i < size; i++) + expect.addr[i] = i + 1; + test_free(expect.addr); + + for (i = 0; i < size; i++) { + /* + * This may fail if the page was recycled by KFENCE and then + * written to again -- this however, is near impossible with a + * default config. + */ + KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0); + + if (!i) /* Only check first access to not fail test if page is ever re-protected. */ + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + } +} + +/* Ensure that constructors work properly. */ +static void test_memcache_ctor(struct kunit *test) +{ + const size_t size = 32; + char *buf; + int i; + + setup_test_cache(test, size, 0, ctor_set_x); + buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); + + for (i = 0; i < 8; i++) + KUNIT_EXPECT_EQ(test, buf[i], (char)'x'); + + test_free(buf); + + KUNIT_EXPECT_FALSE(test, report_available()); +} + +/* Test that memory is zeroed if requested. */ +static void test_gfpzero(struct kunit *test) +{ + const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */ + char *buf1, *buf2; + int i; + + if (CONFIG_KFENCE_SAMPLE_INTERVAL > 100) { + kunit_warn(test, "skipping ... would take too long\n"); + return; + } + + setup_test_cache(test, size, 0, NULL); + buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); + for (i = 0; i < size; i++) + buf1[i] = i + 1; + test_free(buf1); + + /* Try to get same address again -- this can take a while. */ + for (i = 0;; i++) { + buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY); + if (buf1 == buf2) + break; + test_free(buf2); + + if (i == CONFIG_KFENCE_NUM_OBJECTS) { + kunit_warn(test, "giving up ... cannot get same object back\n"); + return; + } + } + + for (i = 0; i < size; i++) + KUNIT_EXPECT_EQ(test, buf2[i], (char)0); + + test_free(buf2); + + KUNIT_EXPECT_FALSE(test, report_available()); +} + +static void test_invalid_access(struct kunit *test) +{ + const struct expect_report expect = { + .type = KFENCE_ERROR_INVALID, + .fn = test_invalid_access, + .addr = &__kfence_pool[10], + .is_write = false, + }; + + READ_ONCE(__kfence_pool[10]); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); +} + +/* Test SLAB_TYPESAFE_BY_RCU works. */ +static void test_memcache_typesafe_by_rcu(struct kunit *test) +{ + const size_t size = 32; + struct expect_report expect = { + .type = KFENCE_ERROR_UAF, + .fn = test_memcache_typesafe_by_rcu, + .is_write = false, + }; + + setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL); + KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */ + + expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); + *expect.addr = 42; + + rcu_read_lock(); + test_free(expect.addr); + KUNIT_EXPECT_EQ(test, *expect.addr, (char)42); + /* + * Up to this point, memory should not have been freed yet, and + * therefore there should be no KFENCE report from the above access. + */ + rcu_read_unlock(); + + /* Above access to @expect.addr should not have generated a report! */ + KUNIT_EXPECT_FALSE(test, report_available()); + + /* Only after rcu_barrier() is the memory guaranteed to be freed. */ + rcu_barrier(); + + /* Expect use-after-free. */ + KUNIT_EXPECT_EQ(test, *expect.addr, (char)42); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); +} + +/* Test krealloc(). */ +static void test_krealloc(struct kunit *test) +{ + const size_t size = 32; + const struct expect_report expect = { + .type = KFENCE_ERROR_UAF, + .fn = test_krealloc, + .addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY), + .is_write = false, + }; + char *buf = expect.addr; + int i; + + KUNIT_EXPECT_FALSE(test, test_cache); + KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */ + for (i = 0; i < size; i++) + buf[i] = i + 1; + + /* Check that we successfully change the size. */ + buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */ + /* Note: Might no longer be a KFENCE alloc. */ + KUNIT_EXPECT_GE(test, ksize(buf), size * 3); + for (i = 0; i < size; i++) + KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1)); + for (; i < size * 3; i++) /* Fill to extra bytes. */ + buf[i] = i + 1; + + buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */ + KUNIT_EXPECT_GE(test, ksize(buf), size * 2); + for (i = 0; i < size * 2; i++) + KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1)); + + buf = krealloc(buf, 0, GFP_KERNEL); /* Free. */ + KUNIT_EXPECT_EQ(test, (unsigned long)buf, (unsigned long)ZERO_SIZE_PTR); + KUNIT_ASSERT_FALSE(test, report_available()); /* No reports yet! */ + + READ_ONCE(*expect.addr); /* Ensure krealloc() actually freed earlier KFENCE object. */ + KUNIT_ASSERT_TRUE(test, report_matches(&expect)); +} + +/* Test that some objects from a bulk allocation belong to KFENCE pool. */ +static void test_memcache_alloc_bulk(struct kunit *test) +{ + const size_t size = 32; + bool pass = false; + unsigned long timeout; + + setup_test_cache(test, size, 0, NULL); + KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */ + /* + * 100x the sample interval should be more than enough to ensure we get + * a KFENCE allocation eventually. + */ + timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL); + do { + void *objects[100]; + int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects), + objects); + if (!num) + continue; + for (i = 0; i < ARRAY_SIZE(objects); i++) { + if (is_kfence_address(objects[i])) { + pass = true; + break; + } + } + kmem_cache_free_bulk(test_cache, num, objects); + /* + * kmem_cache_alloc_bulk() disables interrupts, and calling it + * in a tight loop may not give KFENCE a chance to switch the + * static branch. Call cond_resched() to let KFENCE chime in. + */ + cond_resched(); + } while (!pass && time_before(jiffies, timeout)); + + KUNIT_EXPECT_TRUE(test, pass); + KUNIT_EXPECT_FALSE(test, report_available()); +} + +/* + * KUnit does not provide a way to provide arguments to tests, and we encode + * additional info in the name. Set up 2 tests per test case, one using the + * default allocator, and another using a custom memcache (suffix '-memcache'). + */ +#define KFENCE_KUNIT_CASE(test_name) \ + { .run_case = test_name, .name = #test_name }, \ + { .run_case = test_name, .name = #test_name "-memcache" } + +static struct kunit_case kfence_test_cases[] = { + KFENCE_KUNIT_CASE(test_out_of_bounds_read), + KFENCE_KUNIT_CASE(test_out_of_bounds_write), + KFENCE_KUNIT_CASE(test_use_after_free_read), + KFENCE_KUNIT_CASE(test_double_free), + KFENCE_KUNIT_CASE(test_invalid_addr_free), + KFENCE_KUNIT_CASE(test_corruption), + KFENCE_KUNIT_CASE(test_free_bulk), + KFENCE_KUNIT_CASE(test_init_on_free), + KUNIT_CASE(test_kmalloc_aligned_oob_read), + KUNIT_CASE(test_kmalloc_aligned_oob_write), + KUNIT_CASE(test_shrink_memcache), + KUNIT_CASE(test_memcache_ctor), + KUNIT_CASE(test_invalid_access), + KUNIT_CASE(test_gfpzero), + KUNIT_CASE(test_memcache_typesafe_by_rcu), + KUNIT_CASE(test_krealloc), + KUNIT_CASE(test_memcache_alloc_bulk), + {}, +}; + +/* ===== End test cases ===== */ + +static int test_init(struct kunit *test) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&observed.lock, flags); + for (i = 0; i < ARRAY_SIZE(observed.lines); i++) + observed.lines[i][0] = '\0'; + observed.nlines = 0; + spin_unlock_irqrestore(&observed.lock, flags); + + /* Any test with 'memcache' in its name will want a memcache. */ + if (strstr(test->name, "memcache")) + test->priv = TEST_PRIV_WANT_MEMCACHE; + else + test->priv = NULL; + + return 0; +} + +static void test_exit(struct kunit *test) +{ + test_cache_destroy(); +} + +static struct kunit_suite kfence_test_suite = { + .name = "kfence", + .test_cases = kfence_test_cases, + .init = test_init, + .exit = test_exit, +}; +static struct kunit_suite *kfence_test_suites[] = { &kfence_test_suite, NULL }; + +static void register_tracepoints(struct tracepoint *tp, void *ignore) +{ + check_trace_callback_type_console(probe_console); + if (!strcmp(tp->name, "console")) + WARN_ON(tracepoint_probe_register(tp, probe_console, NULL)); +} + +static void unregister_tracepoints(struct tracepoint *tp, void *ignore) +{ + if (!strcmp(tp->name, "console")) + tracepoint_probe_unregister(tp, probe_console, NULL); +} + +/* + * We only want to do tracepoints setup and teardown once, therefore we have to + * customize the init and exit functions and cannot rely on kunit_test_suite(). + */ +static int __init kfence_test_init(void) +{ + /* + * Because we want to be able to build the test as a module, we need to + * iterate through all known tracepoints, since the static registration + * won't work here. + */ + for_each_kernel_tracepoint(register_tracepoints, NULL); + return __kunit_test_suites_init(kfence_test_suites); +} + +static void kfence_test_exit(void) +{ + __kunit_test_suites_exit(kfence_test_suites); + for_each_kernel_tracepoint(unregister_tracepoints, NULL); + tracepoint_synchronize_unregister(); +} + +late_initcall(kfence_test_init); +module_exit(kfence_test_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Alexander Potapenko , Marco Elver "); diff --git a/mm/kfence/report.c b/mm/kfence/report.c index 4dbfa9a382e4..901bd7ee83d8 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -156,7 +156,12 @@ static void print_diff_canary(unsigned long address, size_t bytes_to_show, pr_cont(" ]"); } -void kfence_report_error(unsigned long address, struct pt_regs *regs, +static const char *get_access_type(bool is_write) +{ + return is_write ? "write" : "read"; +} + +void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs, const struct kfence_metadata *meta, enum kfence_error_type type) { unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 }; @@ -194,17 +199,19 @@ void kfence_report_error(unsigned long address, struct pt_regs *regs, case KFENCE_ERROR_OOB: { const bool left_of_object = address < meta->addr; - pr_err("BUG: KFENCE: out-of-bounds in %pS\n\n", (void *)stack_entries[skipnr]); - pr_err("Out-of-bounds access at 0x" PTR_FMT " (%luB %s of kfence-#%zd):\n", - (void *)address, + pr_err("BUG: KFENCE: out-of-bounds %s in %pS\n\n", get_access_type(is_write), + (void *)stack_entries[skipnr]); + pr_err("Out-of-bounds %s at 0x" PTR_FMT " (%luB %s of kfence-#%zd):\n", + get_access_type(is_write), (void *)address, left_of_object ? meta->addr - address : address - meta->addr, left_of_object ? "left" : "right", object_index); break; } case KFENCE_ERROR_UAF: - pr_err("BUG: KFENCE: use-after-free in %pS\n\n", (void *)stack_entries[skipnr]); - pr_err("Use-after-free access at 0x" PTR_FMT " (in kfence-#%zd):\n", - (void *)address, object_index); + pr_err("BUG: KFENCE: use-after-free %s in %pS\n\n", get_access_type(is_write), + (void *)stack_entries[skipnr]); + pr_err("Use-after-free %s at 0x" PTR_FMT " (in kfence-#%zd):\n", + get_access_type(is_write), (void *)address, object_index); break; case KFENCE_ERROR_CORRUPTION: pr_err("BUG: KFENCE: memory corruption in %pS\n\n", (void *)stack_entries[skipnr]); @@ -213,8 +220,10 @@ void kfence_report_error(unsigned long address, struct pt_regs *regs, pr_cont(" (in kfence-#%zd):\n", object_index); break; case KFENCE_ERROR_INVALID: - pr_err("BUG: KFENCE: invalid access in %pS\n\n", (void *)stack_entries[skipnr]); - pr_err("Invalid access at 0x" PTR_FMT ":\n", (void *)address); + pr_err("BUG: KFENCE: invalid %s in %pS\n\n", get_access_type(is_write), + (void *)stack_entries[skipnr]); + pr_err("Invalid %s at 0x" PTR_FMT ":\n", get_access_type(is_write), + (void *)address); break; case KFENCE_ERROR_INVALID_FREE: pr_err("BUG: KFENCE: invalid free in %pS\n\n", (void *)stack_entries[skipnr]); -- cgit v1.2.3-59-g8ed1b From 928501344fc645f80390afc12708c81b3595745d Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 25 Feb 2021 17:19:55 -0800 Subject: kasan, mm: don't save alloc stacks twice Patch series "kasan: optimizations and fixes for HW_TAGS", v4. This patchset makes the HW_TAGS mode more efficient, mostly by reworking poisoning approaches and simplifying/inlining some internal helpers. With this change, the overhead of HW_TAGS annotations excluding setting and checking memory tags is ~3%. The performance impact caused by tags will be unknown until we have hardware that supports MTE. As a side-effect, this patchset speeds up generic KASAN by ~15%. This patch (of 13): Currently KASAN saves allocation stacks in both kasan_slab_alloc() and kasan_kmalloc() annotations. This patch changes KASAN to save allocation stacks for slab objects from kmalloc caches in kasan_kmalloc() only, and stacks for other slab objects in kasan_slab_alloc() only. This change requires ____kasan_kmalloc() knowing whether the object belongs to a kmalloc cache. This is implemented by adding a flag field to the kasan_info structure. That flag is only set for kmalloc caches via a new kasan_cache_create_kmalloc() annotation. Link: https://lkml.kernel.org/r/cover.1612546384.git.andreyknvl@google.com Link: https://lkml.kernel.org/r/7c673ebca8d00f40a7ad6f04ab9a2bddeeae2097.1612546384.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Catalin Marinas Cc: Vincenzo Frascino Cc: Dmitry Vyukov Cc: Alexander Potapenko Cc: Will Deacon Cc: Andrey Ryabinin Cc: Peter Collingbourne Cc: Evgenii Stepanov Cc: Branislav Rankov Cc: Kevin Brodsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 9 +++++++++ mm/kasan/common.c | 18 ++++++++++++++---- mm/slab_common.c | 1 + 3 files changed, 24 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 7eaf2d9effb4..3fb31e8a353e 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -83,6 +83,7 @@ static inline void kasan_disable_current(void) {} struct kasan_cache { int alloc_meta_offset; int free_meta_offset; + bool is_kmalloc; }; #ifdef CONFIG_KASAN_HW_TAGS @@ -143,6 +144,13 @@ static __always_inline void kasan_cache_create(struct kmem_cache *cache, __kasan_cache_create(cache, size, flags); } +void __kasan_cache_create_kmalloc(struct kmem_cache *cache); +static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) +{ + if (kasan_enabled()) + __kasan_cache_create_kmalloc(cache); +} + size_t __kasan_metadata_size(struct kmem_cache *cache); static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) { @@ -278,6 +286,7 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} +static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, diff --git a/mm/kasan/common.c b/mm/kasan/common.c index af1768c4fee5..d8d83ca56fe2 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -210,6 +210,11 @@ void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, *size = optimal_size; } +void __kasan_cache_create_kmalloc(struct kmem_cache *cache) +{ + cache->kasan_info.is_kmalloc = true; +} + size_t __kasan_metadata_size(struct kmem_cache *cache) { if (!kasan_stack_collection_enabled()) @@ -394,17 +399,22 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip) } } -static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) +static void set_alloc_info(struct kmem_cache *cache, void *object, + gfp_t flags, bool is_kmalloc) { struct kasan_alloc_meta *alloc_meta; + /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */ + if (cache->kasan_info.is_kmalloc && !is_kmalloc) + return; + alloc_meta = kasan_get_alloc_meta(cache, object); if (alloc_meta) kasan_set_track(&alloc_meta->alloc_track, flags); } static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object, - size_t size, gfp_t flags, bool keep_tag) + size_t size, gfp_t flags, bool is_kmalloc) { unsigned long redzone_start; unsigned long redzone_end; @@ -423,7 +433,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object, KASAN_GRANULE_SIZE); redzone_end = round_up((unsigned long)object + cache->object_size, KASAN_GRANULE_SIZE); - tag = assign_tag(cache, object, false, keep_tag); + tag = assign_tag(cache, object, false, is_kmalloc); /* Tag is ignored in set_tag without CONFIG_KASAN_SW/HW_TAGS */ kasan_unpoison(set_tag(object, tag), size); @@ -431,7 +441,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object, KASAN_KMALLOC_REDZONE); if (kasan_stack_collection_enabled()) - set_alloc_info(cache, (void *)object, flags); + set_alloc_info(cache, (void *)object, flags, is_kmalloc); return set_tag(object, tag); } diff --git a/mm/slab_common.c b/mm/slab_common.c index 284954ef1da5..897c3a446b04 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -643,6 +643,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, panic("Out of memory when creating slab %s\n", name); create_boot_cache(s, name, size, flags, useroffset, usersize); + kasan_cache_create_kmalloc(s); list_add(&s->list, &slab_caches); s->refcount = 1; return s; -- cgit v1.2.3-59-g8ed1b From 200072ce33b298cf14d3ed2a570f5eb27609677d Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 25 Feb 2021 17:20:11 -0800 Subject: kasan: unify large kfree checks Unify checks in kasan_kfree_large() and in kasan_slab_free_mempool() for large allocations as it's done for small kfree() allocations. With this change, kasan_slab_free_mempool() starts checking that the first byte of the memory that's being freed is accessible. Link: https://lkml.kernel.org/r/14ffc4cd867e0b1ed58f7527e3b748a1b4ad08aa.1612546384.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Peter Collingbourne Cc: Vincenzo Frascino Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kasan.h | 16 ++++++++-------- mm/kasan/common.c | 36 ++++++++++++++++++++++++++---------- 2 files changed, 34 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 3fb31e8a353e..b91732bd05d7 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -200,6 +200,13 @@ static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object) return false; } +void __kasan_kfree_large(void *ptr, unsigned long ip); +static __always_inline void kasan_kfree_large(void *ptr) +{ + if (kasan_enabled()) + __kasan_kfree_large(ptr, _RET_IP_); +} + void __kasan_slab_free_mempool(void *ptr, unsigned long ip); static __always_inline void kasan_slab_free_mempool(void *ptr) { @@ -247,13 +254,6 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, return (void *)object; } -void __kasan_kfree_large(void *ptr, unsigned long ip); -static __always_inline void kasan_kfree_large(void *ptr) -{ - if (kasan_enabled()) - __kasan_kfree_large(ptr, _RET_IP_); -} - /* * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for * the hardware tag-based mode that doesn't rely on compiler instrumentation. @@ -302,6 +302,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object) { return false; } +static inline void kasan_kfree_large(void *ptr) {} static inline void kasan_slab_free_mempool(void *ptr) {} static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags) @@ -322,7 +323,6 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, { return (void *)object; } -static inline void kasan_kfree_large(void *ptr) {} static inline bool kasan_check_byte(const void *address) { return true; diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 48d51daeda95..8a3d66393dc5 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -364,6 +364,31 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) return ____kasan_slab_free(cache, object, ip, true); } +static bool ____kasan_kfree_large(void *ptr, unsigned long ip) +{ + if (ptr != page_address(virt_to_head_page(ptr))) { + kasan_report_invalid_free(ptr, ip); + return true; + } + + if (!kasan_byte_accessible(ptr)) { + kasan_report_invalid_free(ptr, ip); + return true; + } + + /* + * The object will be poisoned by kasan_free_pages() or + * kasan_slab_free_mempool(). + */ + + return false; +} + +void __kasan_kfree_large(void *ptr, unsigned long ip) +{ + ____kasan_kfree_large(ptr, ip); +} + void __kasan_slab_free_mempool(void *ptr, unsigned long ip) { struct page *page; @@ -377,10 +402,8 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip) * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. */ if (unlikely(!PageSlab(page))) { - if (ptr != page_address(page)) { - kasan_report_invalid_free(ptr, ip); + if (____kasan_kfree_large(ptr, ip)) return; - } kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE); } else { ____kasan_slab_free(page->slab_cache, ptr, ip, false); @@ -539,13 +562,6 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag return ____kasan_kmalloc(page->slab_cache, object, size, flags); } -void __kasan_kfree_large(void *ptr, unsigned long ip) -{ - if (ptr != page_address(virt_to_head_page(ptr))) - kasan_report_invalid_free(ptr, ip); - /* The object will be poisoned by kasan_free_pages(). */ -} - bool __kasan_check_byte(const void *address, unsigned long ip) { if (!kasan_byte_accessible(address)) { -- cgit v1.2.3-59-g8ed1b From df54714f579a77662054132161612ce3da876b0d Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 25 Feb 2021 17:20:56 -0800 Subject: include/linux: remove repeated words Drop the doubled word "for" in a comment. {firewire-cdev.h} Drop the doubled word "in" in a comment. {input.h} Drop the doubled word "a" in a comment. {mdev.h} Drop the doubled word "the" in a comment. {ptrace.h} Link: https://lkml.kernel.org/r/20210126232444.22861-1-rdunlap@infradead.org Signed-off-by: Randy Dunlap Cc: Stefan Richter Cc: Dmitry Torokhov Cc: Kirti Wankhede Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mdev.h | 2 +- include/linux/ptrace.h | 2 +- include/uapi/linux/firewire-cdev.h | 2 +- include/uapi/linux/input.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 9004375c462e..27eb383cb95d 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -42,7 +42,7 @@ struct device *mdev_get_iommu_device(struct device *dev); * @mdev: mdev_device structure on of mediated device * that is being created * Returns integer: success (0) or error (< 0) - * @remove: Called to free resources in parent device's driver for a + * @remove: Called to free resources in parent device's driver for * a mediated device. It is mandatory to provide 'remove' * ops. * @mdev: mdev_device device structure which is being diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 2a9df80ea887..b5ebf6c01292 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -171,7 +171,7 @@ static inline void ptrace_event(int event, unsigned long message) * * Check whether @event is enabled and, if so, report @event and @pid * to the ptrace parent. @pid is reported as the pid_t seen from the - * the ptrace parent's pid namespace. + * ptrace parent's pid namespace. * * Called without locks. */ diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h index 7e5b5c10a49c..5effa9832802 100644 --- a/include/uapi/linux/firewire-cdev.h +++ b/include/uapi/linux/firewire-cdev.h @@ -844,7 +844,7 @@ struct fw_cdev_queue_iso { * struct fw_cdev_start_iso - Start an isochronous transmission or reception * @cycle: Cycle in which to start I/O. If @cycle is greater than or * equal to 0, the I/O will start on that cycle. - * @sync: Determines the value to wait for for receive packets that have + * @sync: Determines the value to wait for receive packets that have * the %FW_CDEV_ISO_SYNC bit set * @tags: Tag filter bit mask. Only valid for isochronous reception. * Determines the tag values for which packets will be accepted. diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index 9a61c28ed3ae..ee3127461ee0 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -84,7 +84,7 @@ struct input_id { * in units per radian. * When INPUT_PROP_ACCELEROMETER is set the resolution changes. * The main axes (ABS_X, ABS_Y, ABS_Z) are then reported in - * in units per g (units/g) and in units per degree per second + * units per g (units/g) and in units per degree per second * (units/deg/s) for rotational axes (ABS_RX, ABS_RY, ABS_RZ). */ struct input_absinfo { -- cgit v1.2.3-59-g8ed1b From c131bd0b5448bb577b7a9ed48c4e528807e8d5af Mon Sep 17 00:00:00 2001 From: Miguel Ojeda Date: Thu, 25 Feb 2021 17:21:00 -0800 Subject: treewide: Miguel has moved Update contact info. Link: https://lkml.kernel.org/r/20210206162524.GA11520@kernel.org Signed-off-by: Miguel Ojeda Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- .mailmap | 1 + CREDITS | 9 +++------ Documentation/admin-guide/auxdisplay/cfag12864b.rst | 2 +- Documentation/admin-guide/auxdisplay/ks0108.rst | 2 +- MAINTAINERS | 12 ++++++------ drivers/auxdisplay/cfag12864b.c | 4 ++-- drivers/auxdisplay/cfag12864bfb.c | 4 ++-- drivers/auxdisplay/ks0108.c | 4 ++-- include/linux/cfag12864b.h | 2 +- include/linux/ks0108.h | 2 +- samples/auxdisplay/cfag12864b-example.c | 2 +- 11 files changed, 21 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/.mailmap b/.mailmap index 87a8bbdbf749..85b93cdefc87 100644 --- a/.mailmap +++ b/.mailmap @@ -237,6 +237,7 @@ Maxime Ripard Mayuresh Janorkar Michael Buesch Michel Dänzer +Miguel Ojeda Mike Rapoport Mike Rapoport Mike Rapoport diff --git a/CREDITS b/CREDITS index be097156bd71..cef83b958cbe 100644 --- a/CREDITS +++ b/CREDITS @@ -2841,14 +2841,11 @@ S: Subiaco, 6008 S: Perth, Western Australia S: Australia -N: Miguel Ojeda Sandonis -E: miguel.ojeda.sandonis@gmail.com -W: http://miguelojeda.es -W: http://jair.lab.fi.uva.es/~migojed/ +N: Miguel Ojeda +E: ojeda@kernel.org +W: https://ojeda.dev D: Author of the ks0108, cfag12864b and cfag12864bfb auxiliary display drivers. D: Maintainer of the auxiliary display drivers tree (drivers/auxdisplay/*) -S: C/ Mieses 20, 9-B -S: Valladolid 47009 S: Spain N: Peter Oruba diff --git a/Documentation/admin-guide/auxdisplay/cfag12864b.rst b/Documentation/admin-guide/auxdisplay/cfag12864b.rst index 18c2865bd322..da385d851acc 100644 --- a/Documentation/admin-guide/auxdisplay/cfag12864b.rst +++ b/Documentation/admin-guide/auxdisplay/cfag12864b.rst @@ -3,7 +3,7 @@ cfag12864b LCD Driver Documentation =================================== :License: GPLv2 -:Author & Maintainer: Miguel Ojeda Sandonis +:Author & Maintainer: Miguel Ojeda :Date: 2006-10-27 diff --git a/Documentation/admin-guide/auxdisplay/ks0108.rst b/Documentation/admin-guide/auxdisplay/ks0108.rst index c0b7faf73136..a7d3fe509373 100644 --- a/Documentation/admin-guide/auxdisplay/ks0108.rst +++ b/Documentation/admin-guide/auxdisplay/ks0108.rst @@ -3,7 +3,7 @@ ks0108 LCD Controller Driver Documentation ========================================== :License: GPLv2 -:Author & Maintainer: Miguel Ojeda Sandonis +:Author & Maintainer: Miguel Ojeda :Date: 2006-10-27 diff --git a/MAINTAINERS b/MAINTAINERS index 40040db747fc..e42082eccf36 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2982,7 +2982,7 @@ F: include/uapi/linux/audit.h F: kernel/audit* AUXILIARY DISPLAY DRIVERS -M: Miguel Ojeda Sandonis +M: Miguel Ojeda S: Maintained F: drivers/auxdisplay/ F: include/linux/cfag12864b.h @@ -4128,13 +4128,13 @@ F: scripts/extract-cert.c F: scripts/sign-file.c CFAG12864B LCD DRIVER -M: Miguel Ojeda Sandonis +M: Miguel Ojeda S: Maintained F: drivers/auxdisplay/cfag12864b.c F: include/linux/cfag12864b.h CFAG12864BFB LCD FRAMEBUFFER DRIVER -M: Miguel Ojeda Sandonis +M: Miguel Ojeda S: Maintained F: drivers/auxdisplay/cfag12864bfb.c F: include/linux/cfag12864b.h @@ -4304,7 +4304,7 @@ S: Supported F: drivers/infiniband/hw/usnic/ CLANG-FORMAT FILE -M: Miguel Ojeda +M: Miguel Ojeda S: Maintained F: .clang-format @@ -4444,7 +4444,7 @@ S: Maintained F: drivers/platform/x86/compal-laptop.c COMPILER ATTRIBUTES -M: Miguel Ojeda +M: Miguel Ojeda S: Maintained F: include/linux/compiler_attributes.h @@ -9939,7 +9939,7 @@ F: include/linux/kprobes.h F: kernel/kprobes.c KS0108 LCD CONTROLLER DRIVER -M: Miguel Ojeda Sandonis +M: Miguel Ojeda S: Maintained F: Documentation/admin-guide/auxdisplay/ks0108.rst F: drivers/auxdisplay/ks0108.c diff --git a/drivers/auxdisplay/cfag12864b.c b/drivers/auxdisplay/cfag12864b.c index 7eebae7e322c..fd430e6866a1 100644 --- a/drivers/auxdisplay/cfag12864b.c +++ b/drivers/auxdisplay/cfag12864b.c @@ -5,7 +5,7 @@ * Description: cfag12864b LCD driver * Depends: ks0108 * - * Author: Copyright (C) Miguel Ojeda Sandonis + * Author: Copyright (C) Miguel Ojeda * Date: 2006-10-31 */ @@ -376,5 +376,5 @@ module_init(cfag12864b_init); module_exit(cfag12864b_exit); MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Miguel Ojeda Sandonis "); +MODULE_AUTHOR("Miguel Ojeda "); MODULE_DESCRIPTION("cfag12864b LCD driver"); diff --git a/drivers/auxdisplay/cfag12864bfb.c b/drivers/auxdisplay/cfag12864bfb.c index 2002291ab338..d66821adf453 100644 --- a/drivers/auxdisplay/cfag12864bfb.c +++ b/drivers/auxdisplay/cfag12864bfb.c @@ -5,7 +5,7 @@ * Description: cfag12864b LCD framebuffer driver * Depends: cfag12864b * - * Author: Copyright (C) Miguel Ojeda Sandonis + * Author: Copyright (C) Miguel Ojeda * Date: 2006-10-31 */ @@ -171,5 +171,5 @@ module_init(cfag12864bfb_init); module_exit(cfag12864bfb_exit); MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Miguel Ojeda Sandonis "); +MODULE_AUTHOR("Miguel Ojeda "); MODULE_DESCRIPTION("cfag12864b LCD framebuffer driver"); diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c index abfe3fa9e6f4..03c95ad4216c 100644 --- a/drivers/auxdisplay/ks0108.c +++ b/drivers/auxdisplay/ks0108.c @@ -5,7 +5,7 @@ * Description: ks0108 LCD Controller driver * Depends: parport * - * Author: Copyright (C) Miguel Ojeda Sandonis + * Author: Copyright (C) Miguel Ojeda * Date: 2006-10-31 */ @@ -182,6 +182,6 @@ module_init(ks0108_init); module_exit(ks0108_exit); MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Miguel Ojeda Sandonis "); +MODULE_AUTHOR("Miguel Ojeda "); MODULE_DESCRIPTION("ks0108 LCD Controller driver"); diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h index 4060004968c8..6617d9c68d86 100644 --- a/include/linux/cfag12864b.h +++ b/include/linux/cfag12864b.h @@ -4,7 +4,7 @@ * Version: 0.1.0 * Description: cfag12864b LCD driver header * - * Author: Copyright (C) Miguel Ojeda Sandonis + * Author: Copyright (C) Miguel Ojeda * Date: 2006-10-12 */ diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h index 0738389b42b6..1a37a664f915 100644 --- a/include/linux/ks0108.h +++ b/include/linux/ks0108.h @@ -4,7 +4,7 @@ * Version: 0.1.0 * Description: ks0108 LCD Controller driver header * - * Author: Copyright (C) Miguel Ojeda Sandonis + * Author: Copyright (C) Miguel Ojeda * Date: 2006-10-31 */ diff --git a/samples/auxdisplay/cfag12864b-example.c b/samples/auxdisplay/cfag12864b-example.c index bfeab44f81d0..2e3bb7375c99 100644 --- a/samples/auxdisplay/cfag12864b-example.c +++ b/samples/auxdisplay/cfag12864b-example.c @@ -4,7 +4,7 @@ * Version: 0.1.0 * Description: cfag12864b LCD userspace example program * - * Author: Copyright (C) Miguel Ojeda Sandonis + * Author: Copyright (C) Miguel Ojeda * Date: 2006-10-31 */ -- cgit v1.2.3-59-g8ed1b From c1f26493ed7f363c63e0e9d91e50d4db26df6603 Mon Sep 17 00:00:00 2001 From: Hubert Jasudowicz Date: Thu, 25 Feb 2021 17:21:03 -0800 Subject: groups: use flexible-array member in struct group_info Replace zero-size array with flexible array member, as recommended by the docs. Link: https://lkml.kernel.org/r/155995eed35c3c1bdcc56e69d8997c8e4c46740a.1611620846.git.hubert.jasudowicz@gmail.com Signed-off-by: Hubert Jasudowicz Cc: "Peter Zijlstra (Intel)" Cc: Micah Morton Cc: Gao Xiang Cc: Michael Kelley Cc: Thomas Cedeno Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cred.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/cred.h b/include/linux/cred.h index 18639c069263..4c6350503697 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -25,7 +25,7 @@ struct inode; struct group_info { atomic_t usage; int ngroups; - kgid_t gid[0]; + kgid_t gid[]; } __randomize_layout; /** -- cgit v1.2.3-59-g8ed1b From a28a6e860c6cf231cf3c5171c75c342adcd00406 Mon Sep 17 00:00:00 2001 From: Francis Laniel Date: Thu, 25 Feb 2021 17:21:20 -0800 Subject: string.h: move fortified functions definitions in a dedicated header. This patch adds fortify-string.h to contain fortified functions definitions. Thus, the code is more separated and compile time is approximately 1% faster for people who do not set CONFIG_FORTIFY_SOURCE. Link: https://lkml.kernel.org/r/20210111092141.22946-1-laniel_francis@privacyrequired.com Link: https://lkml.kernel.org/r/20210111092141.22946-2-laniel_francis@privacyrequired.com Signed-off-by: Francis Laniel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/fortify-string.h | 302 +++++++++++++++++++++++++++++++++++++++++ include/linux/string.h | 282 +------------------------------------- 2 files changed, 303 insertions(+), 281 deletions(-) create mode 100644 include/linux/fortify-string.h (limited to 'include/linux') diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h new file mode 100644 index 000000000000..c1be37437e77 --- /dev/null +++ b/include/linux/fortify-string.h @@ -0,0 +1,302 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FORTIFY_STRING_H_ +#define _LINUX_FORTIFY_STRING_H_ + + +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) +extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); +extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); +extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); +extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); +extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset); +extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); +extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); +extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen); +extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); +extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); +#else +#define __underlying_memchr __builtin_memchr +#define __underlying_memcmp __builtin_memcmp +#define __underlying_memcpy __builtin_memcpy +#define __underlying_memmove __builtin_memmove +#define __underlying_memset __builtin_memset +#define __underlying_strcat __builtin_strcat +#define __underlying_strcpy __builtin_strcpy +#define __underlying_strlen __builtin_strlen +#define __underlying_strncat __builtin_strncat +#define __underlying_strncpy __builtin_strncpy +#endif + +__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 1); + + if (__builtin_constant_p(size) && p_size < size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __underlying_strncpy(p, q, size); +} + +__FORTIFY_INLINE char *strcat(char *p, const char *q) +{ + size_t p_size = __builtin_object_size(p, 1); + + if (p_size == (size_t)-1) + return __underlying_strcat(p, q); + if (strlcat(p, q, p_size) >= p_size) + fortify_panic(__func__); + return p; +} + +__FORTIFY_INLINE __kernel_size_t strlen(const char *p) +{ + __kernel_size_t ret; + size_t p_size = __builtin_object_size(p, 1); + + /* Work around gcc excess stack consumption issue */ + if (p_size == (size_t)-1 || + (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) + return __underlying_strlen(p); + ret = strnlen(p, p_size); + if (p_size <= ret) + fortify_panic(__func__); + return ret; +} + +extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen); +__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen) +{ + size_t p_size = __builtin_object_size(p, 1); + __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size); + + if (p_size <= ret && maxlen != ret) + fortify_panic(__func__); + return ret; +} + +/* defined after fortified strlen to reuse it */ +extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); +__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) +{ + size_t ret; + size_t p_size = __builtin_object_size(p, 1); + size_t q_size = __builtin_object_size(q, 1); + + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __real_strlcpy(p, q, size); + ret = strlen(q); + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + + if (__builtin_constant_p(len) && len >= p_size) + __write_overflow(); + if (len >= p_size) + fortify_panic(__func__); + __underlying_memcpy(p, q, len); + p[len] = '\0'; + } + return ret; +} + +/* defined after fortified strnlen to reuse it */ +extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy); +__FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size) +{ + size_t len; + /* Use string size rather than possible enclosing struct size. */ + size_t p_size = __builtin_object_size(p, 1); + size_t q_size = __builtin_object_size(q, 1); + + /* If we cannot get size of p and q default to call strscpy. */ + if (p_size == (size_t) -1 && q_size == (size_t) -1) + return __real_strscpy(p, q, size); + + /* + * If size can be known at compile time and is greater than + * p_size, generate a compile time write overflow error. + */ + if (__builtin_constant_p(size) && size > p_size) + __write_overflow(); + + /* + * This call protects from read overflow, because len will default to q + * length if it smaller than size. + */ + len = strnlen(q, size); + /* + * If len equals size, we will copy only size bytes which leads to + * -E2BIG being returned. + * Otherwise we will copy len + 1 because of the final '\O'. + */ + len = len == size ? size : len + 1; + + /* + * Generate a runtime write overflow error if len is greater than + * p_size. + */ + if (len > p_size) + fortify_panic(__func__); + + /* + * We can now safely call vanilla strscpy because we are protected from: + * 1. Read overflow thanks to call to strnlen(). + * 2. Write overflow thanks to above ifs. + */ + return __real_strscpy(p, q, len); +} + +/* defined after fortified strlen and strnlen to reuse them */ +__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) +{ + size_t p_len, copy_len; + size_t p_size = __builtin_object_size(p, 1); + size_t q_size = __builtin_object_size(q, 1); + + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __underlying_strncat(p, q, count); + p_len = strlen(p); + copy_len = strnlen(q, count); + if (p_size < p_len + copy_len + 1) + fortify_panic(__func__); + __underlying_memcpy(p + p_len, q, copy_len); + p[p_len + copy_len] = '\0'; + return p; +} + +__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + + if (__builtin_constant_p(size) && p_size < size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __underlying_memset(p, c, size); +} + +__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + + if (__builtin_constant_p(size)) { + if (p_size < size) + __write_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __underlying_memcpy(p, q, size); +} + +__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + + if (__builtin_constant_p(size)) { + if (p_size < size) + __write_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __underlying_memmove(p, q, size); +} + +extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); +__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_memscan(p, c, size); +} + +__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + + if (__builtin_constant_p(size)) { + if (p_size < size) + __read_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __underlying_memcmp(p, q, size); +} + +__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __underlying_memchr(p, c, size); +} + +void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); +__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_memchr_inv(p, c, size); +} + +extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup); +__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp) +{ + size_t p_size = __builtin_object_size(p, 0); + + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_kmemdup(p, size, gfp); +} + +/* defined after fortified strlen and memcpy to reuse them */ +__FORTIFY_INLINE char *strcpy(char *p, const char *q) +{ + size_t p_size = __builtin_object_size(p, 1); + size_t q_size = __builtin_object_size(q, 1); + size_t size; + + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __underlying_strcpy(p, q); + size = strlen(q) + 1; + /* test here to use the more stringent object size */ + if (p_size < size) + fortify_panic(__func__); + memcpy(p, q, size); + return p; +} + +/* Don't use these outside the FORITFY_SOURCE implementation */ +#undef __underlying_memchr +#undef __underlying_memcmp +#undef __underlying_memcpy +#undef __underlying_memmove +#undef __underlying_memset +#undef __underlying_strcat +#undef __underlying_strcpy +#undef __underlying_strlen +#undef __underlying_strncat +#undef __underlying_strncpy + +#endif /* _LINUX_FORTIFY_STRING_H_ */ diff --git a/include/linux/string.h b/include/linux/string.h index 4fcfb56abcf5..9521d8cab18e 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -266,287 +266,7 @@ void __read_overflow3(void) __compiletime_error("detected read beyond size of ob void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) - -#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) -extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); -extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); -extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); -extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); -extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset); -extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); -extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); -extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen); -extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); -extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); -#else -#define __underlying_memchr __builtin_memchr -#define __underlying_memcmp __builtin_memcmp -#define __underlying_memcpy __builtin_memcpy -#define __underlying_memmove __builtin_memmove -#define __underlying_memset __builtin_memset -#define __underlying_strcat __builtin_strcat -#define __underlying_strcpy __builtin_strcpy -#define __underlying_strlen __builtin_strlen -#define __underlying_strncat __builtin_strncat -#define __underlying_strncpy __builtin_strncpy -#endif - -__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 1); - if (__builtin_constant_p(size) && p_size < size) - __write_overflow(); - if (p_size < size) - fortify_panic(__func__); - return __underlying_strncpy(p, q, size); -} - -__FORTIFY_INLINE char *strcat(char *p, const char *q) -{ - size_t p_size = __builtin_object_size(p, 1); - if (p_size == (size_t)-1) - return __underlying_strcat(p, q); - if (strlcat(p, q, p_size) >= p_size) - fortify_panic(__func__); - return p; -} - -__FORTIFY_INLINE __kernel_size_t strlen(const char *p) -{ - __kernel_size_t ret; - size_t p_size = __builtin_object_size(p, 1); - - /* Work around gcc excess stack consumption issue */ - if (p_size == (size_t)-1 || - (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) - return __underlying_strlen(p); - ret = strnlen(p, p_size); - if (p_size <= ret) - fortify_panic(__func__); - return ret; -} - -extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen); -__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen) -{ - size_t p_size = __builtin_object_size(p, 1); - __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size); - if (p_size <= ret && maxlen != ret) - fortify_panic(__func__); - return ret; -} - -/* defined after fortified strlen to reuse it */ -extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); -__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) -{ - size_t ret; - size_t p_size = __builtin_object_size(p, 1); - size_t q_size = __builtin_object_size(q, 1); - if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __real_strlcpy(p, q, size); - ret = strlen(q); - if (size) { - size_t len = (ret >= size) ? size - 1 : ret; - if (__builtin_constant_p(len) && len >= p_size) - __write_overflow(); - if (len >= p_size) - fortify_panic(__func__); - __underlying_memcpy(p, q, len); - p[len] = '\0'; - } - return ret; -} - -/* defined after fortified strnlen to reuse it */ -extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy); -__FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size) -{ - size_t len; - /* Use string size rather than possible enclosing struct size. */ - size_t p_size = __builtin_object_size(p, 1); - size_t q_size = __builtin_object_size(q, 1); - - /* If we cannot get size of p and q default to call strscpy. */ - if (p_size == (size_t) -1 && q_size == (size_t) -1) - return __real_strscpy(p, q, size); - - /* - * If size can be known at compile time and is greater than - * p_size, generate a compile time write overflow error. - */ - if (__builtin_constant_p(size) && size > p_size) - __write_overflow(); - - /* - * This call protects from read overflow, because len will default to q - * length if it smaller than size. - */ - len = strnlen(q, size); - /* - * If len equals size, we will copy only size bytes which leads to - * -E2BIG being returned. - * Otherwise we will copy len + 1 because of the final '\O'. - */ - len = len == size ? size : len + 1; - - /* - * Generate a runtime write overflow error if len is greater than - * p_size. - */ - if (len > p_size) - fortify_panic(__func__); - - /* - * We can now safely call vanilla strscpy because we are protected from: - * 1. Read overflow thanks to call to strnlen(). - * 2. Write overflow thanks to above ifs. - */ - return __real_strscpy(p, q, len); -} - -/* defined after fortified strlen and strnlen to reuse them */ -__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) -{ - size_t p_len, copy_len; - size_t p_size = __builtin_object_size(p, 1); - size_t q_size = __builtin_object_size(q, 1); - if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __underlying_strncat(p, q, count); - p_len = strlen(p); - copy_len = strnlen(q, count); - if (p_size < p_len + copy_len + 1) - fortify_panic(__func__); - __underlying_memcpy(p + p_len, q, copy_len); - p[p_len + copy_len] = '\0'; - return p; -} - -__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) - __write_overflow(); - if (p_size < size) - fortify_panic(__func__); - return __underlying_memset(p, c, size); -} - -__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - size_t q_size = __builtin_object_size(q, 0); - if (__builtin_constant_p(size)) { - if (p_size < size) - __write_overflow(); - if (q_size < size) - __read_overflow2(); - } - if (p_size < size || q_size < size) - fortify_panic(__func__); - return __underlying_memcpy(p, q, size); -} - -__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - size_t q_size = __builtin_object_size(q, 0); - if (__builtin_constant_p(size)) { - if (p_size < size) - __write_overflow(); - if (q_size < size) - __read_overflow2(); - } - if (p_size < size || q_size < size) - fortify_panic(__func__); - return __underlying_memmove(p, q, size); -} - -extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); -__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) - __read_overflow(); - if (p_size < size) - fortify_panic(__func__); - return __real_memscan(p, c, size); -} - -__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - size_t q_size = __builtin_object_size(q, 0); - if (__builtin_constant_p(size)) { - if (p_size < size) - __read_overflow(); - if (q_size < size) - __read_overflow2(); - } - if (p_size < size || q_size < size) - fortify_panic(__func__); - return __underlying_memcmp(p, q, size); -} - -__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) - __read_overflow(); - if (p_size < size) - fortify_panic(__func__); - return __underlying_memchr(p, c, size); -} - -void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); -__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size) -{ - size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) - __read_overflow(); - if (p_size < size) - fortify_panic(__func__); - return __real_memchr_inv(p, c, size); -} - -extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup); -__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp) -{ - size_t p_size = __builtin_object_size(p, 0); - if (__builtin_constant_p(size) && p_size < size) - __read_overflow(); - if (p_size < size) - fortify_panic(__func__); - return __real_kmemdup(p, size, gfp); -} - -/* defined after fortified strlen and memcpy to reuse them */ -__FORTIFY_INLINE char *strcpy(char *p, const char *q) -{ - size_t p_size = __builtin_object_size(p, 1); - size_t q_size = __builtin_object_size(q, 1); - size_t size; - if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __underlying_strcpy(p, q); - size = strlen(q) + 1; - /* test here to use the more stringent object size */ - if (p_size < size) - fortify_panic(__func__); - memcpy(p, q, size); - return p; -} - -/* Don't use these outside the FORITFY_SOURCE implementation */ -#undef __underlying_memchr -#undef __underlying_memcmp -#undef __underlying_memcpy -#undef __underlying_memmove -#undef __underlying_memset -#undef __underlying_strcat -#undef __underlying_strcpy -#undef __underlying_strlen -#undef __underlying_strncat -#undef __underlying_strncpy +#include #endif /** -- cgit v1.2.3-59-g8ed1b From e1fdc403349c64fa58f4c163f4bf9b860b4db808 Mon Sep 17 00:00:00 2001 From: Vijayanand Jitta Date: Thu, 25 Feb 2021 17:21:27 -0800 Subject: lib: stackdepot: add support to disable stack depot Add a kernel parameter stack_depot_disable to disable stack depot. So that stack hash table doesn't consume any memory when stack depot is disabled. The use case is CONFIG_PAGE_OWNER without page_owner=on. Without this patch, stackdepot will consume the memory for the hashtable. By default, it's 8M which is never trivial. With this option, in CONFIG_PAGE_OWNER configured system, page_owner=off, stack_depot_disable in kernel command line, we could save the wasted memory for the hashtable. [akpm@linux-foundation.org: fix CONFIG_STACKDEPOT=n build] Link: https://lkml.kernel.org/r/1611749198-24316-2-git-send-email-vjitta@codeaurora.org Signed-off-by: Vinayak Menon Signed-off-by: Vijayanand Jitta Cc: Alexander Potapenko Cc: Minchan Kim Cc: Yogesh Lal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/admin-guide/kernel-parameters.txt | 6 +++++ include/linux/stackdepot.h | 9 +++++++ init/main.c | 2 ++ lib/stackdepot.c | 32 +++++++++++++++++++++---- 4 files changed, 45 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index bab6a8b01202..04545725f187 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5182,6 +5182,12 @@ growing up) the main stack are reserved for no other mapping. Default value is 256 pages. + stack_depot_disable= [KNL] + Setting this to true through kernel command line will + disable the stack depot thereby saving the static memory + consumed by the stack hash table. By default this is set + to false. + stacktrace [FTRACE] Enabled the stack tracer on boot up. diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index 24d49c732341..6bb4bc1a5f54 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -21,4 +21,13 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries); +#ifdef CONFIG_STACKDEPOT +int stack_depot_init(void); +#else +static inline int stack_depot_init(void) +{ + return 0; +} +#endif /* CONFIG_STACKDEPOT */ + #endif diff --git a/init/main.c b/init/main.c index 261051070e3c..3648c9f94882 100644 --- a/init/main.c +++ b/init/main.c @@ -97,6 +97,7 @@ #include #include #include +#include #include #include @@ -827,6 +828,7 @@ static void __init mm_init(void) init_mem_debugging_and_hardening(); kfence_alloc_pool(); report_meminit(); + stack_depot_init(); mem_init(); /* page_owner must be initialized after buddy is ready */ page_ext_init_flatmem_late(); diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 4b9715470e87..cc21116512a7 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -31,6 +31,7 @@ #include #include #include +#include #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) @@ -145,9 +146,32 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, #define STACK_HASH_MASK (STACK_HASH_SIZE - 1) #define STACK_HASH_SEED 0x9747b28c -static struct stack_record *stack_table[STACK_HASH_SIZE] = { - [0 ... STACK_HASH_SIZE - 1] = NULL -}; +static bool stack_depot_disable; +static struct stack_record **stack_table; + +static int __init is_stack_depot_disabled(char *str) +{ + kstrtobool(str, &stack_depot_disable); + if (stack_depot_disable) { + pr_info("Stack Depot is disabled\n"); + stack_table = NULL; + } + return 0; +} +early_param("stack_depot_disable", is_stack_depot_disabled); + +int __init stack_depot_init(void) +{ + if (!stack_depot_disable) { + size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *)); + int i; + + stack_table = memblock_alloc(size, size); + for (i = 0; i < STACK_HASH_SIZE; i++) + stack_table[i] = NULL; + } + return 0; +} /* Calculate hash for a stack */ static inline u32 hash_stack(unsigned long *entries, unsigned int size) @@ -241,7 +265,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries, unsigned long flags; u32 hash; - if (unlikely(nr_entries == 0)) + if (unlikely(nr_entries == 0) || stack_depot_disable) goto fast_exit; hash = hash_stack(entries, nr_entries); -- cgit v1.2.3-59-g8ed1b From 4945cca232ce8bc699b8743f2436af664c471b96 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 25 Feb 2021 17:21:37 -0800 Subject: include/linux/bitops.h: spelling s/synomyn/synonym/ Fix a misspelling of "synonym". Link: https://lkml.kernel.org/r/20210108105305.2028120-1-geert+renesas@glider.be Signed-off-by: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/bitops.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bitops.h b/include/linux/bitops.h index a61f192c096b..a5a48303b0f1 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -214,7 +214,7 @@ static inline int get_count_order_long(unsigned long l) * __ffs64 - find first set bit in a 64 bit word * @word: The 64 bit word * - * On 64 bit arches this is a synomyn for __ffs + * On 64 bit arches this is a synonym for __ffs * The result is not defined if no bits are set, so check that @word * is non-zero before calling this. */ -- cgit v1.2.3-59-g8ed1b From a5a673f7312253a842f3da8c60c980461cc269ec Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 25 Feb 2021 17:22:15 -0800 Subject: init: clean up early_param_on_off() macro Use early_param() to define early_param_on_off(). Link: https://lkml.kernel.org/r/20210201041532.4025025-1-masahiroy@kernel.org Signed-off-by: Masahiro Yamada Reviewed-by: Johan Hovold Reviewed-by: Miguel Ojeda Cc: Masahiro Yamada Cc: Joe Perches Cc: Nick Desaulniers Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/init.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/init.h b/include/linux/init.h index a01f01c1a5c5..31f54de58429 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -338,14 +338,14 @@ struct obs_kernel_param { var = 1; \ return 0; \ } \ - __setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \ + early_param(str_on, parse_##var##_on); \ \ static int __init parse_##var##_off(char *arg) \ { \ var = 0; \ return 0; \ } \ - __setup_param(str_off, parse_##var##_off, parse_##var##_off, 1) + early_param(str_off, parse_##var##_off) /* Relies on boot_command_line being set */ void __init parse_early_param(void); -- cgit v1.2.3-59-g8ed1b From d54ce6158e354f5358a547b96299ecd7f3725393 Mon Sep 17 00:00:00 2001 From: Sumit Garg Date: Thu, 25 Feb 2021 17:22:38 -0800 Subject: kgdb: fix to kill breakpoints on initmem after boot Currently breakpoints in kernel .init.text section are not handled correctly while allowing to remove them even after corresponding pages have been freed. Fix it via killing .init.text section breakpoints just prior to initmem pages being freed. Doug: "HW breakpoints aren't handled by this patch but it's probably not such a big deal". Link: https://lkml.kernel.org/r/20210224081652.587785-1-sumit.garg@linaro.org Signed-off-by: Sumit Garg Suggested-by: Doug Anderson Acked-by: Doug Anderson Acked-by: Daniel Thompson Tested-by: Daniel Thompson Cc: Masami Hiramatsu Cc: Steven Rostedt (VMware) Cc: Jason Wessel Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kgdb.h | 2 ++ init/main.c | 1 + kernel/debug/debug_core.c | 11 +++++++++++ 3 files changed, 14 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index 0444b44bd156..392a3670944c 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -359,9 +359,11 @@ extern atomic_t kgdb_active; extern bool dbg_is_early; extern void __init dbg_late_init(void); extern void kgdb_panic(const char *msg); +extern void kgdb_free_init_mem(void); #else /* ! CONFIG_KGDB */ #define in_dbg_master() (0) #define dbg_late_init() static inline void kgdb_panic(const char *msg) {} +static inline void kgdb_free_init_mem(void) { } #endif /* ! CONFIG_KGDB */ #endif /* _KGDB_H_ */ diff --git a/init/main.c b/init/main.c index 3648c9f94882..53b278845b88 100644 --- a/init/main.c +++ b/init/main.c @@ -1426,6 +1426,7 @@ static int __ref kernel_init(void *unused) async_synchronize_full(); kprobe_free_init_mem(); ftrace_free_init_mem(); + kgdb_free_init_mem(); free_initmem(); mark_readonly(); diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index b636d517c02c..4708aec492df 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -455,6 +455,17 @@ setundefined: return 0; } +void kgdb_free_init_mem(void) +{ + int i; + + /* Clear init memory breakpoints. */ + for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { + if (init_section_contains((void *)kgdb_break[i].bpt_addr, 0)) + kgdb_break[i].state = BP_UNDEFINED; + } +} + #ifdef CONFIG_KGDB_KDB void kdb_dump_stack_on_cpu(int cpu) { -- cgit v1.2.3-59-g8ed1b From f685a533a7fab35c5d069dcd663f59c8e4171a75 Mon Sep 17 00:00:00 2001 From: Huang Pei Date: Thu, 25 Feb 2021 17:22:49 -0800 Subject: MIPS: make userspace mapping young by default MIPS page fault path(except huge page) takes 3 exceptions (1 TLB Miss + 2 TLB Invalid), butthe second TLB Invalid exception is just triggered by __update_tlb from do_page_fault writing tlb without _PAGE_VALID set. With this patch, user space mapping prot is made young by default (with both _PAGE_VALID and _PAGE_YOUNG set), and it only take 1 TLB Miss + 1 TLB Invalid exception Remove pte_sw_mkyoung without polluting MM code and make page fault delay of MIPS on par with other architecture Link: https://lkml.kernel.org/r/20210204013942.8398-1-huangpei@loongson.cn Signed-off-by: Huang Pei Reviewed-by: Nicholas Piggin Acked-by: Acked-by: Thomas Bogendoerfer Cc: Christophe Leroy Cc: Cc: Bibo Mao Cc: Jiaxun Yang Cc: Paul Burton Cc: Li Xuefeng Cc: Yang Tiezhu Cc: Gao Juxin Cc: Fuxin Zhang Cc: Huacai Chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/mips/mm/cache.c | 30 ++++++++++++++++-------------- include/linux/pgtable.h | 8 -------- mm/memory.c | 4 ---- 3 files changed, 16 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 1754498b0717..7719d632df8d 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -157,29 +157,31 @@ unsigned long _page_cachable_default; EXPORT_SYMBOL(_page_cachable_default); #define PM(p) __pgprot(_page_cachable_default | (p)) +#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p)) static inline void setup_protection_map(void) { protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[4] = PM(_PAGE_PRESENT); - protection_map[5] = PM(_PAGE_PRESENT); - protection_map[6] = PM(_PAGE_PRESENT); - protection_map[7] = PM(_PAGE_PRESENT); + protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); + protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[4] = PVA(_PAGE_PRESENT); + protection_map[5] = PVA(_PAGE_PRESENT); + protection_map[6] = PVA(_PAGE_PRESENT); + protection_map[7] = PVA(_PAGE_PRESENT); protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); - protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | + protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); + protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); - protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); - protection_map[12] = PM(_PAGE_PRESENT); - protection_map[13] = PM(_PAGE_PRESENT); - protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE); - protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE); + protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); + protection_map[12] = PVA(_PAGE_PRESENT); + protection_map[13] = PVA(_PAGE_PRESENT); + protection_map[14] = PVA(_PAGE_PRESENT); + protection_map[15] = PVA(_PAGE_PRESENT); } +#undef _PVA #undef PM void cpu_cache_init(void) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 36eb748f3c97..cdfc4e9f253e 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -432,14 +432,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres * To be differentiate with macro pte_mkyoung, this macro is used on platforms * where software maintains page access bit. */ -#ifndef pte_sw_mkyoung -static inline pte_t pte_sw_mkyoung(pte_t pte) -{ - return pte; -} -#define pte_sw_mkyoung pte_sw_mkyoung -#endif - #ifndef pte_savedwrite #define pte_savedwrite pte_write #endif diff --git a/mm/memory.c b/mm/memory.c index 784249f3307b..c8e357627318 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2902,7 +2902,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) } flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = mk_pte(new_page, vma->vm_page_prot); - entry = pte_sw_mkyoung(entry); entry = maybe_mkwrite(pte_mkdirty(entry), vma); /* @@ -3560,7 +3559,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) __SetPageUptodate(page); entry = mk_pte(page, vma->vm_page_prot); - entry = pte_sw_mkyoung(entry); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); @@ -3745,8 +3743,6 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) if (prefault && arch_wants_old_prefaulted_pte()) entry = pte_mkold(entry); - else - entry = pte_sw_mkyoung(entry); if (write) entry = maybe_mkwrite(pte_mkdirty(entry), vma); -- cgit v1.2.3-59-g8ed1b