Index: mm/kasan/kasan.c |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c |
index c81549d5c8330f59bec68165127dff1d3aab85bd..fd6b7d486ba0ea0e86231ac0293476a096912043 100644 |
--- a/mm/kasan/kasan.c |
+++ b/mm/kasan/kasan.c |
@@ -36,9 +36,19 @@ |
#include <linux/types.h> |
#include <linux/vmalloc.h> |
#include <linux/bug.h> |
+#include <linux/page-isolation.h> |
+#include <asm/cacheflush.h> |
+#include <asm/tlbflush.h> |
+#include <asm/sections.h> |
#include "kasan.h" |
#include "../slab.h" |
+#include "../internal.h" |
+ |
+static DEFINE_SPINLOCK(shadow_lock); |
+static LIST_HEAD(unmap_list); |
+static void kasan_unmap_shadow_workfn(struct work_struct *work); |
+static DECLARE_WORK(kasan_unmap_shadow_work, kasan_unmap_shadow_workfn); |
void kasan_enable_current(void) |
{ |
@@ -116,6 +126,246 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark) |
kasan_unpoison_shadow(sp, size); |
} |
+static void kasan_mark_pshadow(const void *address, size_t size, u8 value) |
+{ |
+ void *pshadow_start; |
+ void *pshadow_end; |
+ |
+ if (!kasan_pshadow_inited()) |
+ return; |
+ |
+ pshadow_start = kasan_mem_to_pshadow(address); |
+ pshadow_end = kasan_mem_to_pshadow(address + size); |
+ |
+ memset(pshadow_start, value, pshadow_end - pshadow_start); |
+} |
+ |
+void kasan_poison_pshadow(const void *address, size_t size) |
+{ |
+ kasan_mark_pshadow(address, size, KASAN_PER_PAGE_BYPASS); |
+} |
+ |
+void kasan_unpoison_pshadow(const void *address, size_t size) |
+{ |
+ kasan_mark_pshadow(address, size, 0); |
+} |
+ |
+static bool kasan_black_shadow(pte_t *ptep) |
+{ |
+ pte_t pte = *ptep; |
+ |
+ if (pte_none(pte)) |
+ return true; |
+ |
+ if (pte_pfn(pte) == kasan_black_page_pfn) |
+ return true; |
+ |
+ return false; |
+} |
+ |
+static int kasan_exist_shadow_pte(pte_t *ptep, pgtable_t token, |
+ unsigned long addr, void *data) |
+{ |
+ unsigned long *count = data; |
+ |
+ if (kasan_black_shadow(ptep)) |
+ return 0; |
+ |
+ (*count)++; |
+ return 0; |
+} |
+ |
+static int kasan_map_shadow_pte(pte_t *ptep, pgtable_t token, |
+ unsigned long addr, void *data) |
+{ |
+ pte_t pte; |
+ gfp_t gfp_flags = *(gfp_t *)data; |
+ struct page *page; |
+ unsigned long flags; |
+ |
+ if (!kasan_black_shadow(ptep)) |
+ return 0; |
+ |
+ page = alloc_page(gfp_flags); |
+ if (!page) |
+ return -ENOMEM; |
+ |
+ __memcpy(page_address(page), kasan_black_page, PAGE_SIZE); |
+ |
+ spin_lock_irqsave(&shadow_lock, flags); |
+ if (!kasan_black_shadow(ptep)) |
+ goto out; |
+ |
+ pte = mk_pte(page, PAGE_KERNEL); |
+ set_pte_at(&init_mm, addr, ptep, pte); |
+ page = NULL; |
+ |
+out: |
+ spin_unlock_irqrestore(&shadow_lock, flags); |
+ if (page) |
+ __free_page(page); |
+ |
+ return 0; |
+} |
+ |
+static int kasan_map_shadow(const void *addr, size_t size, gfp_t flags) |
+{ |
+ int err; |
+ unsigned long shadow_start, shadow_end; |
+ unsigned long count = 0; |
+ |
+ if (!kasan_pshadow_inited()) |
+ return 0; |
+ |
+ flags = flags & GFP_RECLAIM_MASK; |
+ shadow_start = (unsigned long)kasan_mem_to_shadow(addr); |
+ shadow_end = (unsigned long)kasan_mem_to_shadow(addr + size); |
+ shadow_start = round_down(shadow_start, PAGE_SIZE); |
+ shadow_end = ALIGN(shadow_end, PAGE_SIZE); |
+ |
+ err = apply_to_page_range(&init_mm, shadow_start, |
+ shadow_end - shadow_start, |
+ kasan_exist_shadow_pte, &count); |
+ if (err) { |
+ pr_err("checking shadow entry is failed"); |
+ return err; |
+ } |
+ |
+ if (count == (shadow_end - shadow_start) / PAGE_SIZE) |
+ goto out; |
+ |
+ err = apply_to_page_range(&init_mm, shadow_start, |
+ shadow_end - shadow_start, |
+ kasan_map_shadow_pte, (void *)&flags); |
+ |
+out: |
+ arch_kasan_map_shadow(shadow_start, shadow_end); |
+ flush_cache_vmap(shadow_start, shadow_end); |
+ if (err) |
+ pr_err("mapping shadow entry is failed"); |
+ |
+ return err; |
+} |
+ |
+static int kasan_unmap_shadow_pte(pte_t *ptep, pgtable_t token, |
+ unsigned long addr, void *data) |
+{ |
+ pte_t pte; |
+ struct page *page; |
+ struct list_head *list = data; |
+ |
+ if (kasan_black_shadow(ptep)) |
+ return 0; |
+ |
+ if (addr >= (unsigned long)_text && addr < (unsigned long)_end) |
+ return 0; |
+ |
+ pte = *ptep; |
+ page = pfn_to_page(pte_pfn(pte)); |
+ list_add(&page->lru, list); |
+ |
+ pte = pfn_pte(PFN_DOWN(__pa(kasan_black_page)), PAGE_KERNEL); |
+ pte = pte_wrprotect(pte); |
+ set_pte_at(&init_mm, addr, ptep, pte); |
+ |
+ return 0; |
+} |
+ |
+static void kasan_unmap_shadow_workfn(struct work_struct *work) |
+{ |
+ struct page *page, *next; |
+ LIST_HEAD(list); |
+ LIST_HEAD(shadow_list); |
+ unsigned long flags; |
+ unsigned int order; |
+ unsigned long shadow_addr, shadow_size; |
+ unsigned long tlb_start = ULONG_MAX, tlb_end = 0; |
+ int err; |
+ |
+ spin_lock_irqsave(&shadow_lock, flags); |
+ list_splice_init(&unmap_list, &list); |
+ spin_unlock_irqrestore(&shadow_lock, flags); |
+ |
+ if (list_empty(&list)) |
+ return; |
+ |
+ list_for_each_entry_safe(page, next, &list, lru) { |
+ order = page_private(page); |
+ post_alloc_hook(page, order, GFP_NOWAIT); |
+ set_page_private(page, order); |
+ |
+ shadow_addr = (unsigned long)kasan_mem_to_shadow( |
+ page_address(page)); |
+ shadow_size = PAGE_SIZE << (order - KASAN_SHADOW_SCALE_SHIFT); |
+ |
+ tlb_start = min(shadow_addr, tlb_start); |
+ tlb_end = max(shadow_addr + shadow_size, tlb_end); |
+ |
+ flush_cache_vunmap(shadow_addr, shadow_addr + shadow_size); |
+ err = apply_to_page_range(&init_mm, shadow_addr, shadow_size, |
+ kasan_unmap_shadow_pte, &shadow_list); |
+ if (err) { |
+ pr_err("invalid shadow entry is found"); |
+ list_del(&page->lru); |
+ } |
+ } |
+ flush_tlb_kernel_range(tlb_start, tlb_end); |
+ |
+ list_for_each_entry_safe(page, next, &list, lru) { |
+ list_del(&page->lru); |
+ __free_pages(page, page_private(page)); |
+ } |
+ list_for_each_entry_safe(page, next, &shadow_list, lru) { |
+ list_del(&page->lru); |
+ __free_page(page); |
+ } |
+} |
+ |
+static bool kasan_unmap_shadow(struct page *page, unsigned int order, |
+ unsigned int max_order) |
+{ |
+ int err; |
+ unsigned long shadow_addr, shadow_size; |
+ unsigned long count = 0; |
+ LIST_HEAD(list); |
+ unsigned long flags; |
+ struct zone *zone; |
+ int mt; |
+ |
+ if (order < KASAN_SHADOW_SCALE_SHIFT) |
+ return false; |
+ |
+ if (max_order != (KASAN_SHADOW_SCALE_SHIFT + 1)) |
+ return false; |
+ |
+ shadow_addr = (unsigned long)kasan_mem_to_shadow(page_address(page)); |
+ shadow_size = PAGE_SIZE << (order - KASAN_SHADOW_SCALE_SHIFT); |
+ err = apply_to_page_range(&init_mm, shadow_addr, shadow_size, |
+ kasan_exist_shadow_pte, &count); |
+ if (err) { |
+ pr_err("checking shadow entry is failed"); |
+ return false; |
+ } |
+ |
+ if (!count) |
+ return false; |
+ |
+ zone = page_zone(page); |
+ mt = get_pageblock_migratetype(page); |
+ if (!is_migrate_isolate(mt)) |
+ __mod_zone_freepage_state(zone, -(1UL << order), mt); |
+ |
+ set_page_private(page, order); |
+ |
+ spin_lock_irqsave(&shadow_lock, flags); |
+ list_add(&page->lru, &unmap_list); |
+ spin_unlock_irqrestore(&shadow_lock, flags); |
+ |
+ schedule_work(&kasan_unmap_shadow_work); |
+ |
+ return true; |
+} |
+ |
/* |
* All functions below always inlined so compiler could |
* perform better optimizations in each of __asan_loadX/__assn_storeX |
@@ -136,95 +386,79 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr) |
static __always_inline bool memory_is_poisoned_2(unsigned long addr) |
{ |
- u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); |
- |
- if (unlikely(*shadow_addr)) { |
- if (memory_is_poisoned_1(addr + 1)) |
- return true; |
- |
- /* |
- * If single shadow byte covers 2-byte access, we don't |
- * need to do anything more. Otherwise, test the first |
- * shadow byte. |
- */ |
- if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0)) |
- return false; |
+ if (unlikely(memory_is_poisoned_1(addr))) |
+ return true; |
- return unlikely(*(u8 *)shadow_addr); |
- } |
+ /* |
+ * If single shadow byte covers 2-byte access, we don't |
+ * need to do anything more. Otherwise, test the first |
+ * shadow byte. |
+ */ |
+ if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0)) |
+ return false; |
- return false; |
+ return memory_is_poisoned_1(addr + 1); |
} |
static __always_inline bool memory_is_poisoned_4(unsigned long addr) |
{ |
- u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); |
- |
- if (unlikely(*shadow_addr)) { |
- if (memory_is_poisoned_1(addr + 3)) |
- return true; |
- |
- /* |
- * If single shadow byte covers 4-byte access, we don't |
- * need to do anything more. Otherwise, test the first |
- * shadow byte. |
- */ |
- if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3)) |
- return false; |
+ if (unlikely(memory_is_poisoned_1(addr + 3))) |
+ return true; |
- return unlikely(*(u8 *)shadow_addr); |
- } |
+ /* |
+ * If single shadow byte covers 4-byte access, we don't |
+ * need to do anything more. Otherwise, test the first |
+ * shadow byte. |
+ */ |
+ if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3)) |
+ return false; |
- return false; |
+ return memory_is_poisoned_1(addr); |
} |
static __always_inline bool memory_is_poisoned_8(unsigned long addr) |
{ |
- u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); |
+ u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr); |
- if (unlikely(*shadow_addr)) { |
- if (memory_is_poisoned_1(addr + 7)) |
- return true; |
+ if (unlikely(*shadow_addr)) |
+ return true; |
- /* |
- * If single shadow byte covers 8-byte access, we don't |
- * need to do anything more. Otherwise, test the first |
- * shadow byte. |
- */ |
- if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) |
- return false; |
+ /* |
+ * If single shadow byte covers 8-byte access, we don't |
+ * need to do anything more. Otherwise, test the first |
+ * shadow byte. |
+ */ |
+ if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) |
+ return false; |
- return unlikely(*(u8 *)shadow_addr); |
- } |
+ if (unlikely(memory_is_poisoned_1(addr + 7))) |
+ return true; |
return false; |
} |
static __always_inline bool memory_is_poisoned_16(unsigned long addr) |
{ |
- u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr); |
- |
- if (unlikely(*shadow_addr)) { |
- u16 shadow_first_bytes = *(u16 *)shadow_addr; |
+ u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); |
- if (unlikely(shadow_first_bytes)) |
- return true; |
+ if (unlikely(*shadow_addr)) |
+ return true; |
- /* |
- * If two shadow bytes covers 16-byte access, we don't |
- * need to do anything more. Otherwise, test the last |
- * shadow byte. |
- */ |
- if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) |
- return false; |
+ /* |
+ * If two shadow bytes covers 16-byte access, we don't |
+ * need to do anything more. Otherwise, test the last |
+ * shadow byte. |
+ */ |
+ if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) |
+ return false; |
- return memory_is_poisoned_1(addr + 15); |
- } |
+ if (unlikely(memory_is_poisoned_1(addr + 15))) |
+ return true; |
return false; |
} |
-static __always_inline unsigned long bytes_is_zero(const u8 *start, |
+static __always_inline unsigned long bytes_is_nonzero(const u8 *start, |
size_t size) |
{ |
while (size) { |
@@ -237,7 +471,7 @@ static __always_inline unsigned long bytes_is_zero(const u8 *start, |
return 0; |
} |
-static __always_inline unsigned long memory_is_zero(const void *start, |
+static __always_inline unsigned long memory_is_nonzero(const void *start, |
const void *end) |
{ |
unsigned int words; |
@@ -245,11 +479,11 @@ static __always_inline unsigned long memory_is_zero(const void *start, |
unsigned int prefix = (unsigned long)start % 8; |
if (end - start <= 16) |
- return bytes_is_zero(start, end - start); |
+ return bytes_is_nonzero(start, end - start); |
if (prefix) { |
prefix = 8 - prefix; |
- ret = bytes_is_zero(start, prefix); |
+ ret = bytes_is_nonzero(start, prefix); |
if (unlikely(ret)) |
return ret; |
start += prefix; |
@@ -258,12 +492,12 @@ static __always_inline unsigned long memory_is_zero(const void *start, |
words = (end - start) / 8; |
while (words) { |
if (unlikely(*(u64 *)start)) |
- return bytes_is_zero(start, 8); |
+ return bytes_is_nonzero(start, 8); |
start += 8; |
words--; |
} |
- return bytes_is_zero(start, (end - start) % 8); |
+ return bytes_is_nonzero(start, (end - start) % 8); |
} |
static __always_inline bool memory_is_poisoned_n(unsigned long addr, |
@@ -271,7 +505,7 @@ static __always_inline bool memory_is_poisoned_n(unsigned long addr, |
{ |
unsigned long ret; |
- ret = memory_is_zero(kasan_mem_to_shadow((void *)addr), |
+ ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr), |
kasan_mem_to_shadow((void *)addr + size - 1) + 1); |
if (unlikely(ret)) { |
@@ -285,6 +519,72 @@ static __always_inline bool memory_is_poisoned_n(unsigned long addr, |
return false; |
} |
+static __always_inline u8 pshadow_val_builtin(unsigned long addr, size_t size) |
+{ |
+ u8 shadow_val = *(u8 *)kasan_mem_to_pshadow((void *)addr); |
+ |
+ if (shadow_val == KASAN_PER_PAGE_FREE) |
+ return shadow_val; |
+ |
+ if (likely(((addr + size - 1) & PAGE_MASK) >= (size - 1))) |
+ return shadow_val; |
+ |
+ if (shadow_val != *(u8 *)kasan_mem_to_pshadow((void *)addr + size - 1)) |
+ return KASAN_PER_PAGE_FREE; |
+ |
+ return shadow_val; |
+} |
+ |
+static __always_inline u8 pshadow_val_n(unsigned long addr, size_t size) |
+{ |
+ u8 *start, *end; |
+ u8 shadow_val; |
+ |
+ start = kasan_mem_to_pshadow((void *)addr); |
+ end = kasan_mem_to_pshadow((void *)addr + size - 1); |
+ size = end - start + 1; |
+ |
+ shadow_val = *start; |
+ if (shadow_val == KASAN_PER_PAGE_FREE) |
+ return shadow_val; |
+ |
+ while (size) { |
+ /* |
+ * Different shadow value means that access is over |
+ * the boundary. Report the error even if it is |
+ * in the valid area. |
+ */ |
+ if (shadow_val != *start) |
+ return KASAN_PER_PAGE_FREE; |
+ |
+ start++; |
+ size--; |
+ } |
+ |
+ return shadow_val; |
+} |
+ |
+static __always_inline u8 pshadow_val(unsigned long addr, size_t size) |
+{ |
+ if (!kasan_pshadow_inited()) |
+ return KASAN_PER_PAGE_BYPASS; |
+ |
+ if (__builtin_constant_p(size)) { |
+ switch (size) { |
+ case 1: |
+ case 2: |
+ case 4: |
+ case 8: |
+ case 16: |
+ return pshadow_val_builtin(addr, size); |
+ default: |
+ BUILD_BUG(); |
+ } |
+ } |
+ |
+ return pshadow_val_n(addr, size); |
+} |
+ |
static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) |
{ |
if (__builtin_constant_p(size)) { |
@@ -307,6 +607,24 @@ static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) |
return memory_is_poisoned_n(addr, size); |
} |
+static noinline void check_memory_region_slow(unsigned long addr, |
+ size_t size, bool write, |
+ unsigned long ret_ip) |
+{ |
+ preempt_disable(); |
+ if (!arch_kasan_recheck_prepare(addr, size)) |
+ goto report; |
+ |
+ if (!memory_is_poisoned(addr, size)) { |
+ preempt_enable(); |
+ return; |
+ } |
+ |
+report: |
+ preempt_enable(); |
+ __kasan_report(addr, size, write, ret_ip); |
+} |
+ |
static __always_inline void check_memory_region_inline(unsigned long addr, |
size_t size, bool write, |
unsigned long ret_ip) |
@@ -316,14 +634,17 @@ static __always_inline void check_memory_region_inline(unsigned long addr, |
if (unlikely((void *)addr < |
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { |
- kasan_report(addr, size, write, ret_ip); |
+ __kasan_report(addr, size, write, ret_ip); |
return; |
} |
if (likely(!memory_is_poisoned(addr, size))) |
return; |
- kasan_report(addr, size, write, ret_ip); |
+ if (!pshadow_val(addr, size)) |
+ return; |
+ |
+ check_memory_region_slow(addr, size, write, ret_ip); |
} |
static void check_memory_region(unsigned long addr, |
@@ -371,18 +692,51 @@ void *memcpy(void *dest, const void *src, size_t len) |
return __memcpy(dest, src, len); |
} |
+void kasan_report(unsigned long addr, size_t size, |
+ bool is_write, unsigned long ip) |
+{ |
+ if (!pshadow_val(addr, size)) |
+ return; |
+ |
+ check_memory_region_slow(addr, size, is_write, ip); |
+} |
+ |
void kasan_alloc_pages(struct page *page, unsigned int order) |
{ |
- if (likely(!PageHighMem(page))) |
- kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); |
+ if (likely(!PageHighMem(page))) { |
+ if (!kasan_pshadow_inited()) { |
+ kasan_unpoison_shadow(page_address(page), |
+ PAGE_SIZE << order); |
+ return; |
+ } |
+ |
+ kasan_unpoison_pshadow(page_address(page), PAGE_SIZE << order); |
+ } |
} |
void kasan_free_pages(struct page *page, unsigned int order) |
{ |
- if (likely(!PageHighMem(page))) |
- kasan_poison_shadow(page_address(page), |
- PAGE_SIZE << order, |
- KASAN_FREE_PAGE); |
+ if (likely(!PageHighMem(page))) { |
+ if (!kasan_pshadow_inited()) { |
+ kasan_poison_shadow(page_address(page), |
+ PAGE_SIZE << order, |
+ KASAN_FREE_PAGE); |
+ return; |
+ } |
+ |
+ kasan_mark_pshadow(page_address(page), |
+ PAGE_SIZE << order, |
+ KASAN_PER_PAGE_FREE); |
+ } |
+} |
+ |
+bool kasan_free_buddy(struct page *page, unsigned int order, |
+ unsigned int max_order) |
+{ |
+ if (!kasan_pshadow_inited()) |
+ return false; |
+ |
+ return kasan_unmap_shadow(page, order, max_order); |
} |
/* |
@@ -618,19 +972,25 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, |
} |
EXPORT_SYMBOL(kasan_kmalloc); |
-void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) |
+int kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) |
{ |
struct page *page; |
unsigned long redzone_start; |
unsigned long redzone_end; |
+ int err; |
if (gfpflags_allow_blocking(flags)) |
quarantine_reduce(); |
if (unlikely(ptr == NULL)) |
- return; |
+ return 0; |
page = virt_to_page(ptr); |
+ err = kasan_slab_page_alloc(ptr, |
+ PAGE_SIZE << compound_order(page), flags); |
+ if (err) |
+ return err; |
+ |
redzone_start = round_up((unsigned long)(ptr + size), |
KASAN_SHADOW_SCALE_SIZE); |
redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); |
@@ -638,6 +998,8 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) |
kasan_unpoison_shadow(ptr, size); |
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, |
KASAN_PAGE_REDZONE); |
+ |
+ return 0; |
} |
void kasan_krealloc(const void *object, size_t size, gfp_t flags) |
@@ -676,6 +1038,31 @@ void kasan_kfree_large(const void *ptr) |
KASAN_FREE_PAGE); |
} |
+int kasan_slab_page_alloc(const void *addr, size_t size, gfp_t flags) |
+{ |
+ int err; |
+ |
+ if (!kasan_pshadow_inited() || !addr) |
+ return 0; |
+ |
+ err = kasan_map_shadow(addr, size, flags); |
+ if (err) |
+ return err; |
+ |
+ kasan_unpoison_shadow(addr, size); |
+ kasan_poison_pshadow(addr, size); |
+ |
+ return 0; |
+} |
+ |
+void kasan_slab_page_free(const void *addr, size_t size) |
+{ |
+ if (!kasan_pshadow_inited() || !addr) |
+ return; |
+ |
+ kasan_poison_shadow(addr, size, KASAN_FREE_PAGE); |
+} |
+ |
int kasan_module_alloc(void *addr, size_t size) |
{ |
void *ret; |
@@ -710,6 +1097,31 @@ void kasan_free_shadow(const struct vm_struct *vm) |
vfree(kasan_mem_to_shadow(vm->addr)); |
} |
+int kasan_stack_alloc(const void *addr, size_t size) |
+{ |
+ int err; |
+ |
+ if (!kasan_pshadow_inited() || !addr) |
+ return 0; |
+ |
+ err = kasan_map_shadow(addr, size, THREADINFO_GFP); |
+ if (err) |
+ return err; |
+ |
+ kasan_unpoison_shadow(addr, size); |
+ kasan_poison_pshadow(addr, size); |
+ |
+ return 0; |
+} |
+ |
+void kasan_stack_free(const void *addr, size_t size) |
+{ |
+ if (!kasan_pshadow_inited() || !addr) |
+ return; |
+ |
+ kasan_poison_shadow(addr, size, KASAN_FREE_PAGE); |
+} |
+ |
static void register_global(struct kasan_global *global) |
{ |
size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); |