Index: arch/x86/mm/kasan_init_64.c |
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c |
index 0c7d8129bed688696f5caffee90a8d9e1fb75c54..a185668808ebd388d7322b87040c1b44f93efb75 100644 |
--- a/arch/x86/mm/kasan_init_64.c |
+++ b/arch/x86/mm/kasan_init_64.c |
@@ -7,6 +7,7 @@ |
#include <linux/sched.h> |
#include <linux/sched/task.h> |
#include <linux/vmalloc.h> |
+#include <linux/memblock.h> |
#include <asm/e820/types.h> |
#include <asm/tlbflush.h> |
@@ -15,20 +16,38 @@ |
extern pgd_t early_level4_pgt[PTRS_PER_PGD]; |
extern struct range pfn_mapped[E820_MAX_ENTRIES]; |
-static int __init map_range(struct range *range) |
+static __init void *early_alloc(size_t size, int node) |
+{ |
+ return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), |
+ BOOTMEM_ALLOC_ACCESSIBLE, node); |
+} |
+ |
+static int __init map_range(struct range *range, bool pshadow) |
{ |
unsigned long start; |
unsigned long end; |
- start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); |
- end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end)); |
+ start = (unsigned long)pfn_to_kaddr(range->start); |
+ end = (unsigned long)pfn_to_kaddr(range->end); |
/* |
* end + 1 here is intentional. We check several shadow bytes in advance |
* to slightly speed up fastpath. In some rare cases we could cross |
* boundary of mapped shadow, so we just map some more here. |
*/ |
- return vmemmap_populate(start, end + 1, NUMA_NO_NODE); |
+ if (pshadow) { |
+ start = (unsigned long)kasan_mem_to_pshadow((void *)start); |
+ end = (unsigned long)kasan_mem_to_pshadow((void *)end); |
+ |
+ return vmemmap_populate(start, end + 1, NUMA_NO_NODE); |
+ } |
+ |
+ start = (unsigned long)kasan_mem_to_shadow((void *)start); |
+ end = (unsigned long)kasan_mem_to_shadow((void *)end); |
+ |
+ kasan_populate_shadow((void *)start, (void *)end + 1, |
+ false, true); |
+ return 0; |
} |
static void __init clear_pgds(unsigned long start, |
@@ -49,11 +68,10 @@ static void __init clear_pgds(unsigned long start, |
} |
} |
-static void __init kasan_map_early_shadow(pgd_t *pgd) |
+static void __init kasan_map_early_shadow(pgd_t *pgd, |
+ unsigned long start, unsigned long end) |
{ |
int i; |
- unsigned long start = KASAN_SHADOW_START; |
- unsigned long end = KASAN_SHADOW_END; |
for (i = pgd_index(start); start < end; i++) { |
switch (CONFIG_PGTABLE_LEVELS) { |
@@ -109,8 +127,35 @@ void __init kasan_early_init(void) |
for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++) |
kasan_zero_p4d[i] = __p4d(p4d_val); |
- kasan_map_early_shadow(early_level4_pgt); |
- kasan_map_early_shadow(init_level4_pgt); |
+ kasan_map_early_shadow(early_level4_pgt, |
+ KASAN_SHADOW_START, KASAN_SHADOW_END); |
+ kasan_map_early_shadow(init_level4_pgt, |
+ KASAN_SHADOW_START, KASAN_SHADOW_END); |
+ |
+ kasan_early_init_pshadow(); |
+ |
+ kasan_map_early_shadow(early_level4_pgt, |
+ KASAN_PSHADOW_START, KASAN_PSHADOW_END); |
+ kasan_map_early_shadow(init_level4_pgt, |
+ KASAN_PSHADOW_START, KASAN_PSHADOW_END); |
+ |
+ /* Prepare black shadow memory */ |
+ pte_val = __pa_nodebug(kasan_black_page) | __PAGE_KERNEL_RO; |
+ pmd_val = __pa_nodebug(kasan_black_pte) | _KERNPG_TABLE; |
+ pud_val = __pa_nodebug(kasan_black_pmd) | _KERNPG_TABLE; |
+ p4d_val = __pa_nodebug(kasan_black_pud) | _KERNPG_TABLE; |
+ |
+ for (i = 0; i < PTRS_PER_PTE; i++) |
+ kasan_black_pte[i] = __pte(pte_val); |
+ |
+ for (i = 0; i < PTRS_PER_PMD; i++) |
+ kasan_black_pmd[i] = __pmd(pmd_val); |
+ |
+ for (i = 0; i < PTRS_PER_PUD; i++) |
+ kasan_black_pud[i] = __pud(pud_val); |
+ |
+ for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++) |
+ kasan_black_p4d[i] = __p4d(p4d_val); |
} |
void __init kasan_init(void) |
@@ -127,26 +172,62 @@ void __init kasan_init(void) |
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); |
- kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, |
- kasan_mem_to_shadow((void *)PAGE_OFFSET)); |
+ kasan_populate_shadow((void *)KASAN_SHADOW_START, |
+ kasan_mem_to_shadow((void *)PAGE_OFFSET), |
+ true, false); |
for (i = 0; i < E820_MAX_ENTRIES; i++) { |
if (pfn_mapped[i].end == 0) |
break; |
- if (map_range(&pfn_mapped[i])) |
+ if (map_range(&pfn_mapped[i], false)) |
panic("kasan: unable to allocate shadow!"); |
} |
- kasan_populate_zero_shadow( |
+ kasan_populate_shadow( |
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), |
- kasan_mem_to_shadow((void *)__START_KERNEL_map)); |
+ kasan_mem_to_shadow((void *)__START_KERNEL_map), |
+ true, false); |
vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), |
(unsigned long)kasan_mem_to_shadow(_end), |
NUMA_NO_NODE); |
- kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), |
- (void *)KASAN_SHADOW_END); |
+ kasan_populate_shadow(kasan_mem_to_shadow((void *)MODULES_END), |
+ (void *)KASAN_SHADOW_END, |
+ true, false); |
+ |
+ /* For per-page shadow */ |
+ clear_pgds(KASAN_PSHADOW_START, KASAN_PSHADOW_END); |
+ |
+ kasan_populate_shadow((void *)KASAN_PSHADOW_START, |
+ kasan_mem_to_pshadow((void *)PAGE_OFFSET), |
+ true, false); |
+ |
+ for (i = 0; i < E820_MAX_ENTRIES; i++) { |
+ if (pfn_mapped[i].end == 0) |
+ break; |
+ |
+ if (map_range(&pfn_mapped[i], true)) |
+ panic("kasan: unable to allocate shadow!"); |
+ } |
+ kasan_populate_shadow( |
+ kasan_mem_to_pshadow((void *)PAGE_OFFSET + MAXMEM), |
+ kasan_mem_to_pshadow((void *)__START_KERNEL_map), |
+ true, false); |
+ |
+ kasan_populate_shadow( |
+ kasan_mem_to_pshadow(_stext), |
+ kasan_mem_to_pshadow(_end), |
+ false, false); |
+ |
+ kasan_populate_shadow( |
+ kasan_mem_to_pshadow((void *)MODULES_VADDR), |
+ kasan_mem_to_pshadow((void *)MODULES_END), |
+ false, false); |
+ |
+ kasan_populate_shadow(kasan_mem_to_pshadow((void *)MODULES_END), |
+ (void *)KASAN_PSHADOW_END, |
+ true, false); |
load_cr3(init_level4_pgt); |
__flush_tlb_all(); |
@@ -167,3 +248,81 @@ void __init kasan_init(void) |
init_task.kasan_depth = 0; |
pr_info("KernelAddressSanitizer initialized\n"); |
} |
+ |
+static void __init kasan_map_shadow_late(unsigned long start, |
+ unsigned long end) |
+{ |
+ unsigned long addr; |
+ unsigned char *page; |
+ pgd_t *pgd; |
+ p4d_t *p4d; |
+ pud_t *pud; |
+ pmd_t *pmd; |
+ pte_t *ptep; |
+ pte_t pte; |
+ |
+ for (addr = start; addr < end; addr += PAGE_SIZE) { |
+ pgd = pgd_offset_k(addr); |
+ p4d = p4d_offset(pgd, addr); |
+ pud = pud_offset(p4d, addr); |
+ pmd = pmd_offset(pud, addr); |
+ ptep = pte_offset_kernel(pmd, addr); |
+ |
+ page = early_alloc(PAGE_SIZE, NUMA_NO_NODE); |
+ pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); |
+ set_pte_at(&init_mm, addr, ptep, pte); |
+ } |
+} |
+ |
+static void __init __kasan_init_late(unsigned long start, unsigned long end) |
+{ |
+ unsigned long shadow_start, shadow_end; |
+ |
+ shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start); |
+ shadow_start = round_down(shadow_start, PAGE_SIZE); |
+ shadow_end = (unsigned long)kasan_mem_to_shadow((void *)end); |
+ shadow_end = ALIGN(shadow_end, PAGE_SIZE); |
+ |
+ kasan_map_shadow_late(shadow_start, shadow_end); |
+ kasan_poison_pshadow((void *)start, ALIGN(end, PAGE_SIZE) - start); |
+} |
+ |
+void __init kasan_init_late(void) |
+{ |
+ int cpu; |
+ unsigned long start, end; |
+ |
+ for_each_possible_cpu(cpu) { |
+ end = (unsigned long)per_cpu(irq_stack_ptr, cpu); |
+ start = end - IRQ_STACK_SIZE; |
+ |
+ __kasan_init_late(start, end); |
+ |
+ start = (unsigned long)per_cpu(exception_stacks, cpu); |
+ end = start + sizeof(exception_stacks); |
+ |
+ __kasan_init_late(start, end); |
+ } |
+} |
+ |
+/* |
+ * We cannot flush the TLBs in other cpus due to deadlock |
+ * so just flush the TLB in current cpu. Accessing stale TLB |
+ * entry would cause following two problem and we can handle them. |
+ * |
+ * 1. write protection fault: It will be handled by spurious |
+ * fault handler. It will invalidate stale TLB entry. |
+ * 2. false-positive in KASAN shadow check: It will be |
+ * handled by re-check with flushing local TLB. |
+ */ |
+void arch_kasan_map_shadow(unsigned long s, unsigned long e) |
+{ |
+ __flush_tlb_all(); |
+} |
+ |
+bool arch_kasan_recheck_prepare(unsigned long addr, size_t size) |
+{ |
+ __flush_tlb_all(); |
+ |
+ return true; |
+} |