aboutsummaryrefslogtreecommitdiffstats
path: root/mm/kasan/common.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--mm/kasan/common.c177
1 files changed, 16 insertions, 161 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 69f583855c8b..833bf2cfd2a3 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -30,13 +30,20 @@
#include "kasan.h"
#include "../slab.h"
+struct slab *kasan_addr_to_slab(const void *addr)
+{
+ if (virt_addr_valid(addr))
+ return virt_to_slab(addr);
+ return NULL;
+}
+
depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
{
unsigned long entries[KASAN_STACK_DEPTH];
unsigned int nr_entries;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
- return __stack_depot_save(entries, nr_entries, flags, can_alloc);
+ return __stack_depot_save(entries, nr_entries, 0, flags, can_alloc);
}
void kasan_set_track(struct kasan_track *track, gfp_t flags)
@@ -88,17 +95,6 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
}
#endif /* CONFIG_KASAN_STACK */
-/*
- * Only allow cache merging when stack collection is disabled and no metadata
- * is present.
- */
-slab_flags_t __kasan_never_merge(void)
-{
- if (kasan_stack_collection_enabled())
- return SLAB_KASAN;
- return 0;
-}
-
void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
{
u8 tag;
@@ -121,132 +117,11 @@ void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
KASAN_PAGE_FREE, init);
}
-/*
- * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
- * For larger allocations larger redzones are used.
- */
-static inline unsigned int optimal_redzone(unsigned int object_size)
-{
- return
- object_size <= 64 - 16 ? 16 :
- object_size <= 128 - 32 ? 32 :
- object_size <= 512 - 64 ? 64 :
- object_size <= 4096 - 128 ? 128 :
- object_size <= (1 << 14) - 256 ? 256 :
- object_size <= (1 << 15) - 512 ? 512 :
- object_size <= (1 << 16) - 1024 ? 1024 : 2048;
-}
-
-void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
- slab_flags_t *flags)
-{
- unsigned int ok_size;
- unsigned int optimal_size;
-
- /*
- * SLAB_KASAN is used to mark caches as ones that are sanitized by
- * KASAN. Currently this flag is used in two places:
- * 1. In slab_ksize() when calculating the size of the accessible
- * memory within the object.
- * 2. In slab_common.c to prevent merging of sanitized caches.
- */
- *flags |= SLAB_KASAN;
-
- if (!kasan_stack_collection_enabled())
- return;
-
- ok_size = *size;
-
- /* Add alloc meta into redzone. */
- cache->kasan_info.alloc_meta_offset = *size;
- *size += sizeof(struct kasan_alloc_meta);
-
- /*
- * If alloc meta doesn't fit, don't add it.
- * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
- * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
- * larger sizes.
- */
- if (*size > KMALLOC_MAX_SIZE) {
- cache->kasan_info.alloc_meta_offset = 0;
- *size = ok_size;
- /* Continue, since free meta might still fit. */
- }
-
- /* Only the generic mode uses free meta or flexible redzones. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
- return;
- }
-
- /*
- * Add free meta into redzone when it's not possible to store
- * it in the object. This is the case when:
- * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
- * be touched after it was freed, or
- * 2. Object has a constructor, which means it's expected to
- * retain its content until the next allocation, or
- * 3. Object is too small.
- * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
- */
- if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
- cache->object_size < sizeof(struct kasan_free_meta)) {
- ok_size = *size;
-
- cache->kasan_info.free_meta_offset = *size;
- *size += sizeof(struct kasan_free_meta);
-
- /* If free meta doesn't fit, don't add it. */
- if (*size > KMALLOC_MAX_SIZE) {
- cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
- *size = ok_size;
- }
- }
-
- /* Calculate size with optimal redzone. */
- optimal_size = cache->object_size + optimal_redzone(cache->object_size);
- /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
- if (optimal_size > KMALLOC_MAX_SIZE)
- optimal_size = KMALLOC_MAX_SIZE;
- /* Use optimal size if the size with added metas is not large enough. */
- if (*size < optimal_size)
- *size = optimal_size;
-}
-
void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
{
cache->kasan_info.is_kmalloc = true;
}
-size_t __kasan_metadata_size(struct kmem_cache *cache)
-{
- if (!kasan_stack_collection_enabled())
- return 0;
- return (cache->kasan_info.alloc_meta_offset ?
- sizeof(struct kasan_alloc_meta) : 0) +
- (cache->kasan_info.free_meta_offset ?
- sizeof(struct kasan_free_meta) : 0);
-}
-
-struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
- const void *object)
-{
- if (!cache->kasan_info.alloc_meta_offset)
- return NULL;
- return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
-}
-
-#ifdef CONFIG_KASAN_GENERIC
-struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
- const void *object)
-{
- BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
- if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
- return NULL;
- return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
-}
-#endif
-
void __kasan_poison_slab(struct slab *slab)
{
struct page *page = slab_page(slab);
@@ -312,13 +187,9 @@ static inline u8 assign_tag(struct kmem_cache *cache,
void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
{
- struct kasan_alloc_meta *alloc_meta;
-
- if (kasan_stack_collection_enabled()) {
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (alloc_meta)
- __memset(alloc_meta, 0, sizeof(*alloc_meta));
- }
+ /* Initialize per-object metadata if it is present. */
+ if (kasan_requires_meta())
+ kasan_init_object_meta(cache, object);
/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
object = set_tag(object, assign_tag(cache, object, true));
@@ -329,13 +200,11 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
unsigned long ip, bool quarantine, bool init)
{
- u8 tag;
void *tagged_object;
if (!kasan_arch_is_ready())
return false;
- tag = get_tag(object);
tagged_object = object;
object = kasan_reset_tag(object);
@@ -364,7 +233,7 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
return false;
if (kasan_stack_collection_enabled())
- kasan_set_free_info(cache, object, tag);
+ kasan_save_free_info(cache, tagged_object);
return kasan_quarantine_put(cache, object);
}
@@ -423,20 +292,6 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
}
}
-static void set_alloc_info(struct kmem_cache *cache, void *object,
- gfp_t flags, bool is_kmalloc)
-{
- struct kasan_alloc_meta *alloc_meta;
-
- /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
- if (cache->kasan_info.is_kmalloc && !is_kmalloc)
- return;
-
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (alloc_meta)
- kasan_set_track(&alloc_meta->alloc_track, flags);
-}
-
void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
void *object, gfp_t flags, bool init)
{
@@ -466,8 +321,8 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
kasan_unpoison(tagged_object, cache->object_size, init);
/* Save alloc info (if possible) for non-kmalloc() allocations. */
- if (kasan_stack_collection_enabled())
- set_alloc_info(cache, (void *)object, flags, false);
+ if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc)
+ kasan_save_alloc_info(cache, tagged_object, flags);
return tagged_object;
}
@@ -512,8 +367,8 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
* Save alloc info (if possible) for kmalloc() allocations.
* This also rewrites the alloc info when called from kasan_krealloc().
*/
- if (kasan_stack_collection_enabled())
- set_alloc_info(cache, (void *)object, flags, true);
+ if (kasan_stack_collection_enabled() && cache->kasan_info.is_kmalloc)
+ kasan_save_alloc_info(cache, (void *)object, flags);
/* Keep the tag that was set by kasan_slab_alloc(). */
return (void *)object;