OLD | NEW |
1 /* | 1 /* |
2 * This file contains some kasan initialization code. | 2 * This file contains some kasan initialization code. |
3 * | 3 * |
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd. | 4 * Copyright (c) 2015 Samsung Electronics Co., Ltd. |
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
6 * | 6 * |
7 * This program is free software; you can redistribute it and/or modify | 7 * This program is free software; you can redistribute it and/or modify |
8 * it under the terms of the GNU General Public License version 2 as | 8 * it under the terms of the GNU General Public License version 2 as |
9 * published by the Free Software Foundation. | 9 * published by the Free Software Foundation. |
10 * | 10 * |
11 */ | 11 */ |
12 | 12 |
13 #include <linux/bootmem.h> | 13 #include <linux/bootmem.h> |
14 #include <linux/init.h> | 14 #include <linux/init.h> |
15 #include <linux/kasan.h> | 15 #include <linux/kasan.h> |
16 #include <linux/kernel.h> | 16 #include <linux/kernel.h> |
17 #include <linux/memblock.h> | 17 #include <linux/memblock.h> |
18 #include <linux/mm.h> | 18 #include <linux/mm.h> |
19 #include <linux/pfn.h> | 19 #include <linux/pfn.h> |
| 20 #include <linux/vmalloc.h> |
20 | 21 |
21 #include <asm/page.h> | 22 #include <asm/page.h> |
22 #include <asm/pgalloc.h> | 23 #include <asm/pgalloc.h> |
23 | 24 |
| 25 #include "kasan.h" |
| 26 |
| 27 unsigned long kasan_pshadow_offset __read_mostly; |
| 28 unsigned long kasan_black_page_pfn __read_mostly; |
| 29 |
24 /* | 30 /* |
25 * This page serves two purposes: | 31 * This page serves two purposes: |
26 * - It used as early shadow memory. The entire shadow region populated | 32 * - It used as early shadow memory. The entire shadow region populated |
27 * with this page, before we will be able to setup normal shadow memory. | 33 * with this page, before we will be able to setup normal shadow memory. |
28 * - Latter it reused it as zero shadow to cover large ranges of memory | 34 * - Latter it reused it as zero shadow to cover large ranges of memory |
29 * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...). | 35 * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...). |
30 */ | 36 */ |
31 unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; | 37 unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; |
32 | 38 |
| 39 /* |
| 40 * The shadow memory range that this page is mapped will be considered |
| 41 * to be checked later by another shadow memory. |
| 42 */ |
| 43 unsigned char kasan_black_page[PAGE_SIZE] __page_aligned_bss; |
| 44 |
33 #if CONFIG_PGTABLE_LEVELS > 4 | 45 #if CONFIG_PGTABLE_LEVELS > 4 |
34 p4d_t kasan_zero_p4d[PTRS_PER_P4D] __page_aligned_bss; | 46 p4d_t kasan_zero_p4d[PTRS_PER_P4D] __page_aligned_bss; |
| 47 p4d_t kasan_black_p4d[PTRS_PER_P4D] __page_aligned_bss; |
35 #endif | 48 #endif |
36 #if CONFIG_PGTABLE_LEVELS > 3 | 49 #if CONFIG_PGTABLE_LEVELS > 3 |
37 pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; | 50 pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; |
| 51 pud_t kasan_black_pud[PTRS_PER_PUD] __page_aligned_bss; |
38 #endif | 52 #endif |
39 #if CONFIG_PGTABLE_LEVELS > 2 | 53 #if CONFIG_PGTABLE_LEVELS > 2 |
40 pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; | 54 pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; |
| 55 pmd_t kasan_black_pmd[PTRS_PER_PMD] __page_aligned_bss; |
41 #endif | 56 #endif |
42 pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; | 57 pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; |
| 58 pte_t kasan_black_pte[PTRS_PER_PTE] __page_aligned_bss; |
43 | 59 |
44 static __init void *early_alloc(size_t size, int node) | 60 static __init void *early_alloc(size_t size, int node) |
45 { | 61 { |
46 return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), | 62 return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), |
47 BOOTMEM_ALLOC_ACCESSIBLE, node); | 63 BOOTMEM_ALLOC_ACCESSIBLE, node); |
48 } | 64 } |
49 | 65 |
50 static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr, | 66 static void __init kasan_pte_populate(pmd_t *pmd, unsigned long addr, |
51 » » » » unsigned long end) | 67 » » » » unsigned long end, bool zero) |
52 { | 68 { |
53 » pte_t *pte = pte_offset_kernel(pmd, addr); | 69 » pte_t *ptep = pte_offset_kernel(pmd, addr); |
54 » pte_t zero_pte; | 70 » pte_t pte; |
| 71 » unsigned char *page; |
55 | 72 |
56 » zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL); | 73 » pte = pfn_pte(PFN_DOWN(zero ? |
57 » zero_pte = pte_wrprotect(zero_pte); | 74 » » __pa_symbol(kasan_zero_page) : __pa_symbol(kasan_black_page)), |
| 75 » » PAGE_KERNEL); |
| 76 » pte = pte_wrprotect(pte); |
58 | 77 |
59 while (addr + PAGE_SIZE <= end) { | 78 while (addr + PAGE_SIZE <= end) { |
60 » » set_pte_at(&init_mm, addr, pte, zero_pte); | 79 » » set_pte_at(&init_mm, addr, ptep, pte); |
61 addr += PAGE_SIZE; | 80 addr += PAGE_SIZE; |
62 » » pte = pte_offset_kernel(pmd, addr); | 81 » » ptep = pte_offset_kernel(pmd, addr); |
63 } | 82 } |
| 83 |
| 84 if (addr == end) |
| 85 return; |
| 86 |
| 87 /* Population for unaligned end address */ |
| 88 page = early_alloc(PAGE_SIZE, NUMA_NO_NODE); |
| 89 if (!zero) |
| 90 __memcpy(page, kasan_black_page, end - addr); |
| 91 |
| 92 pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); |
| 93 set_pte_at(&init_mm, addr, ptep, pte); |
64 } | 94 } |
65 | 95 |
66 static void __init zero_pmd_populate(pud_t *pud, unsigned long addr, | 96 static void __init kasan_pmd_populate(pud_t *pud, unsigned long addr, |
67 » » » » unsigned long end) | 97 » » » » unsigned long end, bool zero, bool private) |
68 { | 98 { |
69 pmd_t *pmd = pmd_offset(pud, addr); | 99 pmd_t *pmd = pmd_offset(pud, addr); |
70 unsigned long next; | 100 unsigned long next; |
71 | 101 |
72 do { | 102 do { |
73 next = pmd_addr_end(addr, end); | 103 next = pmd_addr_end(addr, end); |
74 | 104 |
75 » » if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { | 105 » » if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE && |
76 » » » pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_p
te)); | 106 » » » !private) { |
| 107 » » » pmd_populate_kernel(&init_mm, pmd, |
| 108 » » » » zero ? lm_alias(kasan_zero_pte) : |
| 109 » » » » » lm_alias(kasan_black_pte)); |
77 continue; | 110 continue; |
78 } | 111 } |
79 | 112 |
80 if (pmd_none(*pmd)) { | 113 if (pmd_none(*pmd)) { |
81 pmd_populate_kernel(&init_mm, pmd, | 114 pmd_populate_kernel(&init_mm, pmd, |
82 early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | 115 early_alloc(PAGE_SIZE, NUMA_NO_NODE)); |
83 } | 116 } |
84 » » zero_pte_populate(pmd, addr, next); | 117 |
| 118 » » kasan_pte_populate(pmd, addr, next, zero); |
85 } while (pmd++, addr = next, addr != end); | 119 } while (pmd++, addr = next, addr != end); |
86 } | 120 } |
87 | 121 |
88 static void __init zero_pud_populate(p4d_t *p4d, unsigned long addr, | 122 static void __init kasan_pud_populate(p4d_t *p4d, unsigned long addr, |
89 » » » » unsigned long end) | 123 » » » » unsigned long end, bool zero, bool private) |
90 { | 124 { |
91 pud_t *pud = pud_offset(p4d, addr); | 125 pud_t *pud = pud_offset(p4d, addr); |
92 unsigned long next; | 126 unsigned long next; |
93 | 127 |
94 do { | 128 do { |
95 next = pud_addr_end(addr, end); | 129 next = pud_addr_end(addr, end); |
96 » » if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { | 130 » » if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE && |
| 131 » » » !private) { |
97 pmd_t *pmd; | 132 pmd_t *pmd; |
98 | 133 |
99 » » » pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); | 134 » » » pud_populate(&init_mm, pud, |
| 135 » » » » zero ? lm_alias(kasan_zero_pmd) : |
| 136 » » » » » lm_alias(kasan_black_pmd)); |
100 pmd = pmd_offset(pud, addr); | 137 pmd = pmd_offset(pud, addr); |
101 » » » pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_p
te)); | 138 » » » pmd_populate_kernel(&init_mm, pmd, |
| 139 » » » » zero ? lm_alias(kasan_zero_pte) : |
| 140 » » » » » lm_alias(kasan_black_pte)); |
102 continue; | 141 continue; |
103 } | 142 } |
104 | 143 |
105 if (pud_none(*pud)) { | 144 if (pud_none(*pud)) { |
106 pud_populate(&init_mm, pud, | 145 pud_populate(&init_mm, pud, |
107 early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | 146 early_alloc(PAGE_SIZE, NUMA_NO_NODE)); |
108 } | 147 } |
109 » » zero_pmd_populate(pud, addr, next); | 148 » » kasan_pmd_populate(pud, addr, next, zero, private); |
110 } while (pud++, addr = next, addr != end); | 149 } while (pud++, addr = next, addr != end); |
111 } | 150 } |
112 | 151 |
113 static void __init zero_p4d_populate(pgd_t *pgd, unsigned long addr, | 152 static void __init kasan_p4d_populate(pgd_t *pgd, unsigned long addr, |
114 » » » » unsigned long end) | 153 » » » » unsigned long end, bool zero, bool private) |
115 { | 154 { |
116 p4d_t *p4d = p4d_offset(pgd, addr); | 155 p4d_t *p4d = p4d_offset(pgd, addr); |
117 unsigned long next; | 156 unsigned long next; |
118 | 157 |
119 do { | 158 do { |
120 next = p4d_addr_end(addr, end); | 159 next = p4d_addr_end(addr, end); |
121 » » if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) { | 160 » » if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE && |
| 161 » » » !private) { |
122 pud_t *pud; | 162 pud_t *pud; |
123 pmd_t *pmd; | 163 pmd_t *pmd; |
124 | 164 |
125 » » » p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud)); | 165 » » » p4d_populate(&init_mm, p4d, |
| 166 » » » » zero ? lm_alias(kasan_zero_pud) : |
| 167 » » » » » lm_alias(kasan_black_pud)); |
126 pud = pud_offset(p4d, addr); | 168 pud = pud_offset(p4d, addr); |
127 » » » pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); | 169 » » » pud_populate(&init_mm, pud, |
| 170 » » » » zero ? lm_alias(kasan_zero_pmd) : |
| 171 » » » » » lm_alias(kasan_black_pmd)); |
128 pmd = pmd_offset(pud, addr); | 172 pmd = pmd_offset(pud, addr); |
129 pmd_populate_kernel(&init_mm, pmd, | 173 pmd_populate_kernel(&init_mm, pmd, |
130 » » » » » » lm_alias(kasan_zero_pte)); | 174 » » » » zero ? lm_alias(kasan_zero_pte) : |
| 175 » » » » » lm_alias(kasan_black_pte)); |
131 continue; | 176 continue; |
132 } | 177 } |
133 | 178 |
134 if (p4d_none(*p4d)) { | 179 if (p4d_none(*p4d)) { |
135 p4d_populate(&init_mm, p4d, | 180 p4d_populate(&init_mm, p4d, |
136 early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | 181 early_alloc(PAGE_SIZE, NUMA_NO_NODE)); |
137 } | 182 } |
138 » » zero_pud_populate(p4d, addr, next); | 183 » » kasan_pud_populate(p4d, addr, next, zero, private); |
139 } while (p4d++, addr = next, addr != end); | 184 } while (p4d++, addr = next, addr != end); |
140 } | 185 } |
141 | 186 |
142 /** | 187 /** |
143 * kasan_populate_zero_shadow - populate shadow memory region with | 188 * kasan_populate_shadow - populate shadow memory region with |
144 * kasan_zero_page | 189 * kasan_(zero|black)_page |
145 * @shadow_start - start of the memory range to populate | 190 * @shadow_start - start of the memory range to populate |
146 * @shadow_end - end of the memory range to populate | 191 * @shadow_end - end of the memory range to populate |
| 192 * @zero - type of populated shadow, zero and black |
| 193 * @private - force to populate private shadow except the last page |
147 */ | 194 */ |
148 void __init kasan_populate_zero_shadow(const void *shadow_start, | 195 void __init kasan_populate_shadow(const void *shadow_start, |
149 » » » » const void *shadow_end) | 196 » » » » const void *shadow_end, |
| 197 » » » » bool zero, bool private) |
150 { | 198 { |
151 unsigned long addr = (unsigned long)shadow_start; | 199 unsigned long addr = (unsigned long)shadow_start; |
152 unsigned long end = (unsigned long)shadow_end; | 200 unsigned long end = (unsigned long)shadow_end; |
153 pgd_t *pgd = pgd_offset_k(addr); | 201 pgd_t *pgd = pgd_offset_k(addr); |
154 unsigned long next; | 202 unsigned long next; |
155 | 203 |
156 do { | 204 do { |
157 next = pgd_addr_end(addr, end); | 205 next = pgd_addr_end(addr, end); |
158 | 206 |
159 » » if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) { | 207 » » if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE && |
| 208 » » » !private) { |
160 p4d_t *p4d; | 209 p4d_t *p4d; |
161 pud_t *pud; | 210 pud_t *pud; |
162 pmd_t *pmd; | 211 pmd_t *pmd; |
163 | 212 |
164 /* | 213 /* |
165 * kasan_zero_pud should be populated with pmds | 214 * kasan_zero_pud should be populated with pmds |
166 * at this moment. | 215 * at this moment. |
167 * [pud,pmd]_populate*() below needed only for | 216 * [pud,pmd]_populate*() below needed only for |
168 * 3,2 - level page tables where we don't have | 217 * 3,2 - level page tables where we don't have |
169 * puds,pmds, so pgd_populate(), pud_populate() | 218 * puds,pmds, so pgd_populate(), pud_populate() |
170 * is noops. | 219 * is noops. |
171 * | 220 * |
172 * The ifndef is required to avoid build breakage. | 221 * The ifndef is required to avoid build breakage. |
173 * | 222 * |
174 * With 5level-fixup.h, pgd_populate() is not nop and | 223 * With 5level-fixup.h, pgd_populate() is not nop and |
175 * we reference kasan_zero_p4d. It's not defined | 224 * we reference kasan_zero_p4d. It's not defined |
176 * unless 5-level paging enabled. | 225 * unless 5-level paging enabled. |
177 * | 226 * |
178 * The ifndef can be dropped once all KASAN-enabled | 227 * The ifndef can be dropped once all KASAN-enabled |
179 * architectures will switch to pgtable-nop4d.h. | 228 * architectures will switch to pgtable-nop4d.h. |
180 */ | 229 */ |
181 #ifndef __ARCH_HAS_5LEVEL_HACK | 230 #ifndef __ARCH_HAS_5LEVEL_HACK |
182 » » » pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_p4d)); | 231 » » » pgd_populate(&init_mm, pgd, |
| 232 » » » » zero ? lm_alias(kasan_zero_p4d) : |
| 233 » » » » » lm_alias(kasan_black_p4d)); |
183 #endif | 234 #endif |
184 p4d = p4d_offset(pgd, addr); | 235 p4d = p4d_offset(pgd, addr); |
185 » » » p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud)); | 236 » » » p4d_populate(&init_mm, p4d, |
| 237 » » » » zero ? lm_alias(kasan_zero_pud) : |
| 238 » » » » » lm_alias(kasan_black_pud)); |
186 pud = pud_offset(p4d, addr); | 239 pud = pud_offset(p4d, addr); |
187 » » » pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); | 240 » » » pud_populate(&init_mm, pud, |
| 241 » » » » zero ? lm_alias(kasan_zero_pmd) : |
| 242 » » » » » lm_alias(kasan_black_pmd)); |
188 pmd = pmd_offset(pud, addr); | 243 pmd = pmd_offset(pud, addr); |
189 » » » pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_p
te)); | 244 » » » pmd_populate_kernel(&init_mm, pmd, |
| 245 » » » » zero ? lm_alias(kasan_zero_pte) : |
| 246 » » » » » lm_alias(kasan_black_pte)); |
190 continue; | 247 continue; |
191 } | 248 } |
192 | 249 |
193 if (pgd_none(*pgd)) { | 250 if (pgd_none(*pgd)) { |
194 pgd_populate(&init_mm, pgd, | 251 pgd_populate(&init_mm, pgd, |
195 early_alloc(PAGE_SIZE, NUMA_NO_NODE)); | 252 early_alloc(PAGE_SIZE, NUMA_NO_NODE)); |
196 } | 253 } |
197 » » zero_p4d_populate(pgd, addr, next); | 254 » » kasan_p4d_populate(pgd, addr, next, zero, private); |
198 } while (pgd++, addr = next, addr != end); | 255 } while (pgd++, addr = next, addr != end); |
199 } | 256 } |
| 257 |
| 258 void __init kasan_early_init_pshadow(void) |
| 259 { |
| 260 static struct vm_struct pshadow; |
| 261 unsigned long kernel_offset; |
| 262 int i; |
| 263 |
| 264 /* |
| 265 * Temprorary map per-page shadow to per-byte shadow in order to |
| 266 * pass the KASAN checks in vm_area_register_early() |
| 267 */ |
| 268 kernel_offset = (unsigned long)kasan_shadow_to_mem( |
| 269 (void *)KASAN_SHADOW_START); |
| 270 kasan_pshadow_offset = KASAN_SHADOW_START - |
| 271 (kernel_offset >> PAGE_SHIFT); |
| 272 |
| 273 pshadow.size = KASAN_PSHADOW_SIZE; |
| 274 pshadow.flags = VM_ALLOC | VM_NO_GUARD; |
| 275 vm_area_register_early(&pshadow, |
| 276 (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)); |
| 277 |
| 278 kasan_pshadow_offset = (unsigned long)pshadow.addr - |
| 279 (kernel_offset >> PAGE_SHIFT); |
| 280 |
| 281 BUILD_BUG_ON(KASAN_FREE_PAGE != KASAN_PER_PAGE_BYPASS); |
| 282 kasan_black_page_pfn = PFN_DOWN(__pa(kasan_black_page)); |
| 283 for (i = 0; i < PAGE_SIZE; i++) |
| 284 kasan_black_page[i] = KASAN_FREE_PAGE; |
| 285 } |
OLD | NEW |