Commit 34303244 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds
Browse files

kasan, mm: check kasan_enabled in annotations

Declare the kasan_enabled static key in include/linux/kasan.h and in
include/linux/mm.h and check it in all kasan annotations. This allows to
avoid any slowdown caused by function calls when kasan_enabled is
disabled.

Link: https://lkml.kernel.org/r/9f90e3c0aa840dbb4833367c2335193299f69023.1606162397.git.andreyknvl@google.com
Link: https://linux-review.googlesource.com/id/I2589451d3c96c97abbcbf714baabe6161c6f153e


Co-developed-by: default avatarVincenzo Frascino <Vincenzo.Frascino@arm.com>
Signed-off-by: default avatarVincenzo Frascino <Vincenzo.Frascino@arm.com>
Signed-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarMarco Elver <elver@google.com>
Reviewed-by: default avatarDmitry Vyukov <dvyukov@google.com>
Tested-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8028caac
Loading
Loading
Loading
Loading
+166 −47
Original line number Diff line number Diff line
@@ -2,6 +2,7 @@
#ifndef _LINUX_KASAN_H
#define _LINUX_KASAN_H

#include <linux/static_key.h>
#include <linux/types.h>

struct kmem_cache;
@@ -75,54 +76,176 @@ static inline void kasan_disable_current(void) {}

#ifdef CONFIG_KASAN

void kasan_unpoison_range(const void *address, size_t size);
struct kasan_cache {
	int alloc_meta_offset;
	int free_meta_offset;
};

#ifdef CONFIG_KASAN_HW_TAGS
DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
static __always_inline bool kasan_enabled(void)
{
	return static_branch_likely(&kasan_flag_enabled);
}
#else
static inline bool kasan_enabled(void)
{
	return true;
}
#endif

void __kasan_unpoison_range(const void *addr, size_t size);
static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
{
	if (kasan_enabled())
		__kasan_unpoison_range(addr, size);
}

void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
void __kasan_alloc_pages(struct page *page, unsigned int order);
static __always_inline void kasan_alloc_pages(struct page *page,
						unsigned int order)
{
	if (kasan_enabled())
		__kasan_alloc_pages(page, order);
}

void __kasan_free_pages(struct page *page, unsigned int order);
static __always_inline void kasan_free_pages(struct page *page,
						unsigned int order)
{
	if (kasan_enabled())
		__kasan_free_pages(page, order);
}

void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
				slab_flags_t *flags);
static __always_inline void kasan_cache_create(struct kmem_cache *cache,
				unsigned int *size, slab_flags_t *flags)
{
	if (kasan_enabled())
		__kasan_cache_create(cache, size, flags);
}

size_t __kasan_metadata_size(struct kmem_cache *cache);
static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
{
	if (kasan_enabled())
		return __kasan_metadata_size(cache);
	return 0;
}

void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object);
void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
void __kasan_poison_slab(struct page *page);
static __always_inline void kasan_poison_slab(struct page *page)
{
	if (kasan_enabled())
		__kasan_poison_slab(page);
}

void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
							void *object)
{
	if (kasan_enabled())
		__kasan_unpoison_object_data(cache, object);
}

void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
							void *object)
{
	if (kasan_enabled())
		__kasan_poison_object_data(cache, object);
}

void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
					  const void *object);
static __always_inline void * __must_check kasan_init_slab_obj(
				struct kmem_cache *cache, const void *object)
{
	if (kasan_enabled())
		return __kasan_init_slab_obj(cache, object);
	return (void *)object;
}

void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
						gfp_t flags);
void kasan_kfree_large(void *ptr, unsigned long ip);
void kasan_poison_kfree(void *ptr, unsigned long ip);
void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
						unsigned long ip)
{
	if (kasan_enabled())
		return __kasan_slab_free(s, object, ip);
	return false;
}

void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
				       void *object, gfp_t flags);
static __always_inline void * __must_check kasan_slab_alloc(
				struct kmem_cache *s, void *object, gfp_t flags)
{
	if (kasan_enabled())
		return __kasan_slab_alloc(s, object, flags);
	return object;
}

void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
				    size_t size, gfp_t flags);
void * __must_check kasan_krealloc(const void *object, size_t new_size,
					gfp_t flags);
static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
				const void *object, size_t size, gfp_t flags)
{
	if (kasan_enabled())
		return __kasan_kmalloc(s, object, size, flags);
	return (void *)object;
}

void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
					gfp_t flags);
bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
void * __must_check __kasan_kmalloc_large(const void *ptr,
					  size_t size, gfp_t flags);
static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
						      size_t size, gfp_t flags)
{
	if (kasan_enabled())
		return __kasan_kmalloc_large(ptr, size, flags);
	return (void *)ptr;
}

struct kasan_cache {
	int alloc_meta_offset;
	int free_meta_offset;
};
void * __must_check __kasan_krealloc(const void *object,
				     size_t new_size, gfp_t flags);
static __always_inline void * __must_check kasan_krealloc(const void *object,
						 size_t new_size, gfp_t flags)
{
	if (kasan_enabled())
		return __kasan_krealloc(object, new_size, flags);
	return (void *)object;
}

size_t kasan_metadata_size(struct kmem_cache *cache);
void __kasan_poison_kfree(void *ptr, unsigned long ip);
static __always_inline void kasan_poison_kfree(void *ptr, unsigned long ip)
{
	if (kasan_enabled())
		__kasan_poison_kfree(ptr, ip);
}

void __kasan_kfree_large(void *ptr, unsigned long ip);
static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
{
	if (kasan_enabled())
		__kasan_kfree_large(ptr, ip);
}

bool kasan_save_enable_multi_shot(void);
void kasan_restore_multi_shot(bool enabled);

#else /* CONFIG_KASAN */

static inline bool kasan_enabled(void)
{
	return false;
}
static inline void kasan_unpoison_range(const void *address, size_t size) {}

static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
static inline void kasan_free_pages(struct page *page, unsigned int order) {}

static inline void kasan_cache_create(struct kmem_cache *cache,
				      unsigned int *size,
				      slab_flags_t *flags) {}

static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
					void *object) {}
@@ -133,36 +256,32 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
{
	return (void *)object;
}

static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags)
static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
				   unsigned long ip)
{
	return ptr;
	return false;
}
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
				   gfp_t flags)
{
	return object;
}
static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
				size_t size, gfp_t flags)
{
	return (void *)object;
}
static inline void *kasan_krealloc(const void *object, size_t new_size,
				 gfp_t flags)
static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
{
	return (void *)object;
	return (void *)ptr;
}

static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
static inline void *kasan_krealloc(const void *object, size_t new_size,
				 gfp_t flags)
{
	return object;
}
static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
				   unsigned long ip)
{
	return false;
	return (void *)object;
}

static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}

#endif /* CONFIG_KASAN */

+16 −6
Original line number Diff line number Diff line
@@ -31,6 +31,7 @@
#include <linux/sizes.h>
#include <linux/sched.h>
#include <linux/pgtable.h>
#include <linux/kasan.h>

struct mempolicy;
struct anon_vma;
@@ -1422,22 +1423,30 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
#endif /* CONFIG_NUMA_BALANCING */

#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)

static inline u8 page_kasan_tag(const struct page *page)
{
	if (kasan_enabled())
		return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
	return 0xff;
}

static inline void page_kasan_tag_set(struct page *page, u8 tag)
{
	if (kasan_enabled()) {
		page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
		page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
	}
}

static inline void page_kasan_tag_reset(struct page *page)
{
	if (kasan_enabled())
		page_kasan_tag_set(page, 0xff);
}
#else

#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */

static inline u8 page_kasan_tag(const struct page *page)
{
	return 0xff;
@@ -1445,7 +1454,8 @@ static inline u8 page_kasan_tag(const struct page *page)

static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
static inline void page_kasan_tag_reset(struct page *page) { }
#endif

#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */

static inline struct zone *page_zone(const struct page *page)
{
+28 −28
Original line number Diff line number Diff line
@@ -58,7 +58,7 @@ void kasan_disable_current(void)
}
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */

void kasan_unpoison_range(const void *address, size_t size)
void __kasan_unpoison_range(const void *address, size_t size)
{
	unpoison_range(address, size);
}
@@ -86,7 +86,7 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
}
#endif /* CONFIG_KASAN_STACK */

void kasan_alloc_pages(struct page *page, unsigned int order)
void __kasan_alloc_pages(struct page *page, unsigned int order)
{
	u8 tag;
	unsigned long i;
@@ -100,7 +100,7 @@ void kasan_alloc_pages(struct page *page, unsigned int order)
	unpoison_range(page_address(page), PAGE_SIZE << order);
}

void kasan_free_pages(struct page *page, unsigned int order)
void __kasan_free_pages(struct page *page, unsigned int order)
{
	if (likely(!PageHighMem(page)))
		poison_range(page_address(page),
@@ -127,7 +127,7 @@ static inline unsigned int optimal_redzone(unsigned int object_size)
		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
}

void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
			  slab_flags_t *flags)
{
	unsigned int orig_size = *size;
@@ -173,7 +173,7 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
	*flags |= SLAB_KASAN;
}

size_t kasan_metadata_size(struct kmem_cache *cache)
size_t __kasan_metadata_size(struct kmem_cache *cache)
{
	if (!kasan_stack_collection_enabled())
		return 0;
@@ -196,7 +196,7 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
	return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
}

void kasan_poison_slab(struct page *page)
void __kasan_poison_slab(struct page *page)
{
	unsigned long i;

@@ -206,12 +206,12 @@ void kasan_poison_slab(struct page *page)
		     KASAN_KMALLOC_REDZONE);
}

void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
{
	unpoison_range(object, cache->object_size);
}

void kasan_poison_object_data(struct kmem_cache *cache, void *object)
void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
	poison_range(object,
			round_up(cache->object_size, KASAN_GRANULE_SIZE),
@@ -264,7 +264,7 @@ static u8 assign_tag(struct kmem_cache *cache, const void *object,
#endif
}

void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
						const void *object)
{
	struct kasan_alloc_meta *alloc_meta;
@@ -283,7 +283,7 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
	return (void *)object;
}

static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
			      unsigned long ip, bool quarantine)
{
	u8 tag;
@@ -326,9 +326,9 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
	return IS_ENABLED(CONFIG_KASAN_GENERIC);
}

bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
{
	return __kasan_slab_free(cache, object, ip, true);
	return ____kasan_slab_free(cache, object, ip, true);
}

static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
@@ -336,7 +336,7 @@ static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
	kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
}

static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
				size_t size, gfp_t flags, bool keep_tag)
{
	unsigned long redzone_start;
@@ -368,20 +368,20 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
	return set_tag(object, tag);
}

void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
					gfp_t flags)
void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
					void *object, gfp_t flags)
{
	return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
	return ____kasan_kmalloc(cache, object, cache->object_size, flags, false);
}

void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
					size_t size, gfp_t flags)
{
	return __kasan_kmalloc(cache, object, size, flags, true);
	return ____kasan_kmalloc(cache, object, size, flags, true);
}
EXPORT_SYMBOL(kasan_kmalloc);
EXPORT_SYMBOL(__kasan_kmalloc);

void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
						gfp_t flags)
{
	struct page *page;
@@ -406,7 +406,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
	return (void *)ptr;
}

void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
{
	struct page *page;

@@ -416,13 +416,13 @@ void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
	page = virt_to_head_page(object);

	if (unlikely(!PageSlab(page)))
		return kasan_kmalloc_large(object, size, flags);
		return __kasan_kmalloc_large(object, size, flags);
	else
		return __kasan_kmalloc(page->slab_cache, object, size,
		return ____kasan_kmalloc(page->slab_cache, object, size,
						flags, true);
}

void kasan_poison_kfree(void *ptr, unsigned long ip)
void __kasan_poison_kfree(void *ptr, unsigned long ip)
{
	struct page *page;

@@ -435,11 +435,11 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
		}
		poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
	} else {
		__kasan_slab_free(page->slab_cache, ptr, ip, false);
		____kasan_slab_free(page->slab_cache, ptr, ip, false);
	}
}

void kasan_kfree_large(void *ptr, unsigned long ip)
void __kasan_kfree_large(void *ptr, unsigned long ip)
{
	if (ptr != page_address(virt_to_head_page(ptr)))
		kasan_report_invalid_free(ptr, ip);