From c0054c565ae598073d6c27762c7d4f7de49a45d9 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 22 Dec 2020 12:02:52 -0800 Subject: kasan: inline kasan_reset_tag for tag-based modes Using kasan_reset_tag() currently results in a function call. As it's called quite often from the allocator code, this leads to a noticeable slowdown. Move it to include/linux/kasan.h and turn it into a static inline function. Also remove the now unneeded reset_tag() internal KASAN macro and use kasan_reset_tag() instead. Link: https://lkml.kernel.org/r/6940383a3a9dfb416134d338d8fac97a9ebb8686.1606162397.git.andreyknvl@google.com Link: https://linux-review.googlesource.com/id/I4d2061acfe91d480a75df00b07c22d8494ef14b5 Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Reviewed-by: Dmitry Vyukov Tested-by: Vincenzo Frascino Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Branislav Rankov Cc: Catalin Marinas Cc: Evgenii Stepanov Cc: Kevin Brodsky Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/kasan/common.c | 6 +++--- mm/kasan/hw_tags.c | 9 ++------- mm/kasan/kasan.h | 4 ---- mm/kasan/report.c | 4 ++-- mm/kasan/report_hw_tags.c | 2 +- mm/kasan/report_sw_tags.c | 4 ++-- mm/kasan/shadow.c | 4 ++-- mm/kasan/sw_tags.c | 9 ++------- 8 files changed, 14 insertions(+), 28 deletions(-) (limited to 'mm') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index b71dfe7c5059..780ec27459ab 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -179,14 +179,14 @@ size_t kasan_metadata_size(struct kmem_cache *cache) struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, const void *object) { - return (void *)reset_tag(object) + cache->kasan_info.alloc_meta_offset; + return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset; } struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, const void *object) { BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); - return (void *)reset_tag(object) + cache->kasan_info.free_meta_offset; + return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset; } void kasan_poison_slab(struct page *page) @@ -283,7 +283,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object, tag = get_tag(object); tagged_object = object; - object = reset_tag(object); + object = kasan_reset_tag(object); if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != object)) { diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index cb849c8da978..227599e54e8e 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -30,20 +30,15 @@ void __init kasan_init_hw_tags(void) pr_info("KernelAddressSanitizer initialized\n"); } -void *kasan_reset_tag(const void *addr) -{ - return reset_tag(addr); -} - void poison_range(const void *address, size_t size, u8 value) { - hw_set_mem_tag_range(reset_tag(address), + hw_set_mem_tag_range(kasan_reset_tag(address), round_up(size, KASAN_GRANULE_SIZE), value); } void unpoison_range(const void *address, size_t size) { - hw_set_mem_tag_range(reset_tag(address), + hw_set_mem_tag_range(kasan_reset_tag(address), round_up(size, KASAN_GRANULE_SIZE), get_tag(address)); } diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 0eab7e4cecb8..5e8cd2080369 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -248,15 +248,11 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag) return addr; } #endif -#ifndef arch_kasan_reset_tag -#define arch_kasan_reset_tag(addr) ((void *)(addr)) -#endif #ifndef arch_kasan_get_tag #define arch_kasan_get_tag(addr) 0 #endif #define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag))) -#define reset_tag(addr) ((void *)arch_kasan_reset_tag(addr)) #define get_tag(addr) arch_kasan_get_tag(addr) #ifdef CONFIG_KASAN_HW_TAGS diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 85ce2cb2cd2b..00c590efdaea 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -328,7 +328,7 @@ void kasan_report_invalid_free(void *object, unsigned long ip) unsigned long flags; u8 tag = get_tag(object); - object = reset_tag(object); + object = kasan_reset_tag(object); #if IS_ENABLED(CONFIG_KUNIT) if (current->kunit_test) @@ -361,7 +361,7 @@ static void __kasan_report(unsigned long addr, size_t size, bool is_write, disable_trace_on_warning(); tagged_addr = (void *)addr; - untagged_addr = reset_tag(tagged_addr); + untagged_addr = kasan_reset_tag(tagged_addr); info.access_addr = tagged_addr; if (addr_has_metadata(untagged_addr)) diff --git a/mm/kasan/report_hw_tags.c b/mm/kasan/report_hw_tags.c index da543eb832cd..57114f0e14d1 100644 --- a/mm/kasan/report_hw_tags.c +++ b/mm/kasan/report_hw_tags.c @@ -22,7 +22,7 @@ const char *get_bug_type(struct kasan_access_info *info) void *find_first_bad_addr(void *addr, size_t size) { - return reset_tag(addr); + return kasan_reset_tag(addr); } void metadata_fetch_row(char *buffer, void *row) diff --git a/mm/kasan/report_sw_tags.c b/mm/kasan/report_sw_tags.c index 317100fd95b9..7604b46239d4 100644 --- a/mm/kasan/report_sw_tags.c +++ b/mm/kasan/report_sw_tags.c @@ -41,7 +41,7 @@ const char *get_bug_type(struct kasan_access_info *info) int i; tag = get_tag(info->access_addr); - addr = reset_tag(info->access_addr); + addr = kasan_reset_tag(info->access_addr); page = kasan_addr_to_page(addr); if (page && PageSlab(page)) { cache = page->slab_cache; @@ -72,7 +72,7 @@ const char *get_bug_type(struct kasan_access_info *info) void *find_first_bad_addr(void *addr, size_t size) { u8 tag = get_tag(addr); - void *p = reset_tag(addr); + void *p = kasan_reset_tag(addr); void *end = p + size; while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p)) diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index ac6a5f57df33..44a2b748f9d3 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -81,7 +81,7 @@ void poison_range(const void *address, size_t size, u8 value) * some of the callers (e.g. kasan_poison_object_data) pass tagged * addresses to this function. */ - address = reset_tag(address); + address = kasan_reset_tag(address); shadow_start = kasan_mem_to_shadow(address); shadow_end = kasan_mem_to_shadow(address + size); @@ -98,7 +98,7 @@ void unpoison_range(const void *address, size_t size) * some of the callers (e.g. kasan_unpoison_object_data) pass tagged * addresses to this function. */ - address = reset_tag(address); + address = kasan_reset_tag(address); poison_range(address, size, tag); diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index 6d7648cc3b98..e17de2619bbf 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -67,11 +67,6 @@ u8 random_tag(void) return (u8)(state % (KASAN_TAG_MAX + 1)); } -void *kasan_reset_tag(const void *addr) -{ - return reset_tag(addr); -} - bool check_memory_region(unsigned long addr, size_t size, bool write, unsigned long ret_ip) { @@ -107,7 +102,7 @@ bool check_memory_region(unsigned long addr, size_t size, bool write, if (tag == KASAN_TAG_KERNEL) return true; - untagged_addr = reset_tag((const void *)addr); + untagged_addr = kasan_reset_tag((const void *)addr); if (unlikely(untagged_addr < kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { return !kasan_report(addr, size, write, ret_ip); @@ -126,7 +121,7 @@ bool check_memory_region(unsigned long addr, size_t size, bool write, bool check_invalid_free(void *addr) { u8 tag = get_tag(addr); - u8 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(reset_tag(addr))); + u8 shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(kasan_reset_tag(addr))); return (shadow_byte == KASAN_TAG_INVALID) || (tag != KASAN_TAG_KERNEL && tag != shadow_byte); -- cgit v1.2.3-59-g8ed1b