Index: mm/kasan/kasan.h |
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h |
index 1229298cce646ddc725c4f1ea9d823a0c71e3cd1..7a20707093a191be649a6204c70235de54b79a65 100644 |
--- a/mm/kasan/kasan.h |
+++ b/mm/kasan/kasan.h |
@@ -13,6 +13,9 @@ |
#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */ |
#define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */ |
+#define KASAN_PER_PAGE_BYPASS 0xFF /* page should be checked by per-byte shadow */ |
+#define KASAN_PER_PAGE_FREE 0xFE /* page was freed */ |
+ |
/* |
* Stack redzone shadow values |
* (Those are compiler's ABI, don't change them) |
@@ -85,17 +88,28 @@ struct kasan_free_meta { |
struct qlist_node quarantine_link; |
}; |
+extern unsigned long kasan_black_page_pfn; |
+ |
struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, |
const void *object); |
struct kasan_free_meta *get_free_info(struct kmem_cache *cache, |
const void *object); |
-static inline const void *kasan_shadow_to_mem(const void *shadow_addr) |
-{ |
- return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET) |
- << KASAN_SHADOW_SCALE_SHIFT); |
-} |
+#ifdef HAVE_KASAN_PER_PAGE_SHADOW |
+void arch_kasan_map_shadow(unsigned long s, unsigned long e); |
+bool arch_kasan_recheck_prepare(unsigned long addr, size_t size); |
+static inline bool kasan_pshadow_inited(void) { return true; } |
+ |
+#else |
+static inline void arch_kasan_map_shadow(unsigned long s, unsigned long e) { } |
+static inline bool arch_kasan_recheck_prepare(unsigned long addr, |
+ size_t size) { return false; } |
+static inline bool kasan_pshadow_inited(void) { return false; } |
+#endif |
+ |
+void __kasan_report(unsigned long addr, size_t size, |
+ bool is_write, unsigned long ip); |
void kasan_report(unsigned long addr, size_t size, |
bool is_write, unsigned long ip); |
void kasan_report_double_free(struct kmem_cache *cache, void *object, |