OLD | NEW |
1 /* | 1 /* |
2 * This file contains shadow memory manipulation code. | 2 * This file contains shadow memory manipulation code. |
3 * | 3 * |
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. | 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> | 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
6 * | 6 * |
7 * Some code borrowed from https://github.com/xairy/kasan-prototype by | 7 * Some code borrowed from https://github.com/xairy/kasan-prototype by |
8 * Andrey Konovalov <adech.fo@gmail.com> | 8 * Andrey Konovalov <adech.fo@gmail.com> |
9 * | 9 * |
10 * This program is free software; you can redistribute it and/or modify | 10 * This program is free software; you can redistribute it and/or modify |
(...skipping 18 matching lines...) Expand all Loading... |
29 #include <linux/module.h> | 29 #include <linux/module.h> |
30 #include <linux/printk.h> | 30 #include <linux/printk.h> |
31 #include <linux/sched.h> | 31 #include <linux/sched.h> |
32 #include <linux/sched/task_stack.h> | 32 #include <linux/sched/task_stack.h> |
33 #include <linux/slab.h> | 33 #include <linux/slab.h> |
34 #include <linux/stacktrace.h> | 34 #include <linux/stacktrace.h> |
35 #include <linux/string.h> | 35 #include <linux/string.h> |
36 #include <linux/types.h> | 36 #include <linux/types.h> |
37 #include <linux/vmalloc.h> | 37 #include <linux/vmalloc.h> |
38 #include <linux/bug.h> | 38 #include <linux/bug.h> |
| 39 #include <linux/page-isolation.h> |
| 40 #include <asm/cacheflush.h> |
| 41 #include <asm/tlbflush.h> |
| 42 #include <asm/sections.h> |
39 | 43 |
40 #include "kasan.h" | 44 #include "kasan.h" |
41 #include "../slab.h" | 45 #include "../slab.h" |
| 46 #include "../internal.h" |
| 47 |
| 48 static DEFINE_SPINLOCK(shadow_lock); |
| 49 static LIST_HEAD(unmap_list); |
| 50 static void kasan_unmap_shadow_workfn(struct work_struct *work); |
| 51 static DECLARE_WORK(kasan_unmap_shadow_work, kasan_unmap_shadow_workfn); |
42 | 52 |
43 void kasan_enable_current(void) | 53 void kasan_enable_current(void) |
44 { | 54 { |
45 current->kasan_depth++; | 55 current->kasan_depth++; |
46 } | 56 } |
47 | 57 |
48 void kasan_disable_current(void) | 58 void kasan_disable_current(void) |
49 { | 59 { |
50 current->kasan_depth--; | 60 current->kasan_depth--; |
51 } | 61 } |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
109 void kasan_unpoison_stack_above_sp_to(const void *watermark) | 119 void kasan_unpoison_stack_above_sp_to(const void *watermark) |
110 { | 120 { |
111 const void *sp = __builtin_frame_address(0); | 121 const void *sp = __builtin_frame_address(0); |
112 size_t size = watermark - sp; | 122 size_t size = watermark - sp; |
113 | 123 |
114 if (WARN_ON(sp > watermark)) | 124 if (WARN_ON(sp > watermark)) |
115 return; | 125 return; |
116 kasan_unpoison_shadow(sp, size); | 126 kasan_unpoison_shadow(sp, size); |
117 } | 127 } |
118 | 128 |
| 129 static void kasan_mark_pshadow(const void *address, size_t size, u8 value) |
| 130 { |
| 131 void *pshadow_start; |
| 132 void *pshadow_end; |
| 133 |
| 134 if (!kasan_pshadow_inited()) |
| 135 return; |
| 136 |
| 137 pshadow_start = kasan_mem_to_pshadow(address); |
| 138 pshadow_end = kasan_mem_to_pshadow(address + size); |
| 139 |
| 140 memset(pshadow_start, value, pshadow_end - pshadow_start); |
| 141 } |
| 142 |
| 143 void kasan_poison_pshadow(const void *address, size_t size) |
| 144 { |
| 145 kasan_mark_pshadow(address, size, KASAN_PER_PAGE_BYPASS); |
| 146 } |
| 147 |
| 148 void kasan_unpoison_pshadow(const void *address, size_t size) |
| 149 { |
| 150 kasan_mark_pshadow(address, size, 0); |
| 151 } |
| 152 |
| 153 static bool kasan_black_shadow(pte_t *ptep) |
| 154 { |
| 155 pte_t pte = *ptep; |
| 156 |
| 157 if (pte_none(pte)) |
| 158 return true; |
| 159 |
| 160 if (pte_pfn(pte) == kasan_black_page_pfn) |
| 161 return true; |
| 162 |
| 163 return false; |
| 164 } |
| 165 |
| 166 static int kasan_exist_shadow_pte(pte_t *ptep, pgtable_t token, |
| 167 unsigned long addr, void *data) |
| 168 { |
| 169 unsigned long *count = data; |
| 170 |
| 171 if (kasan_black_shadow(ptep)) |
| 172 return 0; |
| 173 |
| 174 (*count)++; |
| 175 return 0; |
| 176 } |
| 177 |
| 178 static int kasan_map_shadow_pte(pte_t *ptep, pgtable_t token, |
| 179 unsigned long addr, void *data) |
| 180 { |
| 181 pte_t pte; |
| 182 gfp_t gfp_flags = *(gfp_t *)data; |
| 183 struct page *page; |
| 184 unsigned long flags; |
| 185 |
| 186 if (!kasan_black_shadow(ptep)) |
| 187 return 0; |
| 188 |
| 189 page = alloc_page(gfp_flags); |
| 190 if (!page) |
| 191 return -ENOMEM; |
| 192 |
| 193 __memcpy(page_address(page), kasan_black_page, PAGE_SIZE); |
| 194 |
| 195 spin_lock_irqsave(&shadow_lock, flags); |
| 196 if (!kasan_black_shadow(ptep)) |
| 197 goto out; |
| 198 |
| 199 pte = mk_pte(page, PAGE_KERNEL); |
| 200 set_pte_at(&init_mm, addr, ptep, pte); |
| 201 page = NULL; |
| 202 |
| 203 out: |
| 204 spin_unlock_irqrestore(&shadow_lock, flags); |
| 205 if (page) |
| 206 __free_page(page); |
| 207 |
| 208 return 0; |
| 209 } |
| 210 |
| 211 static int kasan_map_shadow(const void *addr, size_t size, gfp_t flags) |
| 212 { |
| 213 int err; |
| 214 unsigned long shadow_start, shadow_end; |
| 215 unsigned long count = 0; |
| 216 |
| 217 if (!kasan_pshadow_inited()) |
| 218 return 0; |
| 219 |
| 220 flags = flags & GFP_RECLAIM_MASK; |
| 221 shadow_start = (unsigned long)kasan_mem_to_shadow(addr); |
| 222 shadow_end = (unsigned long)kasan_mem_to_shadow(addr + size); |
| 223 shadow_start = round_down(shadow_start, PAGE_SIZE); |
| 224 shadow_end = ALIGN(shadow_end, PAGE_SIZE); |
| 225 |
| 226 err = apply_to_page_range(&init_mm, shadow_start, |
| 227 shadow_end - shadow_start, |
| 228 kasan_exist_shadow_pte, &count); |
| 229 if (err) { |
| 230 pr_err("checking shadow entry is failed"); |
| 231 return err; |
| 232 } |
| 233 |
| 234 if (count == (shadow_end - shadow_start) / PAGE_SIZE) |
| 235 goto out; |
| 236 |
| 237 err = apply_to_page_range(&init_mm, shadow_start, |
| 238 shadow_end - shadow_start, |
| 239 kasan_map_shadow_pte, (void *)&flags); |
| 240 |
| 241 out: |
| 242 arch_kasan_map_shadow(shadow_start, shadow_end); |
| 243 flush_cache_vmap(shadow_start, shadow_end); |
| 244 if (err) |
| 245 pr_err("mapping shadow entry is failed"); |
| 246 |
| 247 return err; |
| 248 } |
| 249 |
| 250 static int kasan_unmap_shadow_pte(pte_t *ptep, pgtable_t token, |
| 251 unsigned long addr, void *data) |
| 252 { |
| 253 pte_t pte; |
| 254 struct page *page; |
| 255 struct list_head *list = data; |
| 256 |
| 257 if (kasan_black_shadow(ptep)) |
| 258 return 0; |
| 259 |
| 260 if (addr >= (unsigned long)_text && addr < (unsigned long)_end) |
| 261 return 0; |
| 262 |
| 263 pte = *ptep; |
| 264 page = pfn_to_page(pte_pfn(pte)); |
| 265 list_add(&page->lru, list); |
| 266 |
| 267 pte = pfn_pte(PFN_DOWN(__pa(kasan_black_page)), PAGE_KERNEL); |
| 268 pte = pte_wrprotect(pte); |
| 269 set_pte_at(&init_mm, addr, ptep, pte); |
| 270 |
| 271 return 0; |
| 272 } |
| 273 |
| 274 static void kasan_unmap_shadow_workfn(struct work_struct *work) |
| 275 { |
| 276 struct page *page, *next; |
| 277 LIST_HEAD(list); |
| 278 LIST_HEAD(shadow_list); |
| 279 unsigned long flags; |
| 280 unsigned int order; |
| 281 unsigned long shadow_addr, shadow_size; |
| 282 unsigned long tlb_start = ULONG_MAX, tlb_end = 0; |
| 283 int err; |
| 284 |
| 285 spin_lock_irqsave(&shadow_lock, flags); |
| 286 list_splice_init(&unmap_list, &list); |
| 287 spin_unlock_irqrestore(&shadow_lock, flags); |
| 288 |
| 289 if (list_empty(&list)) |
| 290 return; |
| 291 |
| 292 list_for_each_entry_safe(page, next, &list, lru) { |
| 293 order = page_private(page); |
| 294 post_alloc_hook(page, order, GFP_NOWAIT); |
| 295 set_page_private(page, order); |
| 296 |
| 297 shadow_addr = (unsigned long)kasan_mem_to_shadow( |
| 298 page_address(page)); |
| 299 shadow_size = PAGE_SIZE << (order - KASAN_SHADOW_SCALE_SHIFT); |
| 300 |
| 301 tlb_start = min(shadow_addr, tlb_start); |
| 302 tlb_end = max(shadow_addr + shadow_size, tlb_end); |
| 303 |
| 304 flush_cache_vunmap(shadow_addr, shadow_addr + shadow_size); |
| 305 err = apply_to_page_range(&init_mm, shadow_addr, shadow_size, |
| 306 kasan_unmap_shadow_pte, &shadow_list); |
| 307 if (err) { |
| 308 pr_err("invalid shadow entry is found"); |
| 309 list_del(&page->lru); |
| 310 } |
| 311 } |
| 312 flush_tlb_kernel_range(tlb_start, tlb_end); |
| 313 |
| 314 list_for_each_entry_safe(page, next, &list, lru) { |
| 315 list_del(&page->lru); |
| 316 __free_pages(page, page_private(page)); |
| 317 } |
| 318 list_for_each_entry_safe(page, next, &shadow_list, lru) { |
| 319 list_del(&page->lru); |
| 320 __free_page(page); |
| 321 } |
| 322 } |
| 323 |
| 324 static bool kasan_unmap_shadow(struct page *page, unsigned int order, |
| 325 unsigned int max_order) |
| 326 { |
| 327 int err; |
| 328 unsigned long shadow_addr, shadow_size; |
| 329 unsigned long count = 0; |
| 330 LIST_HEAD(list); |
| 331 unsigned long flags; |
| 332 struct zone *zone; |
| 333 int mt; |
| 334 |
| 335 if (order < KASAN_SHADOW_SCALE_SHIFT) |
| 336 return false; |
| 337 |
| 338 if (max_order != (KASAN_SHADOW_SCALE_SHIFT + 1)) |
| 339 return false; |
| 340 |
| 341 shadow_addr = (unsigned long)kasan_mem_to_shadow(page_address(page)); |
| 342 shadow_size = PAGE_SIZE << (order - KASAN_SHADOW_SCALE_SHIFT); |
| 343 err = apply_to_page_range(&init_mm, shadow_addr, shadow_size, |
| 344 kasan_exist_shadow_pte, &count); |
| 345 if (err) { |
| 346 pr_err("checking shadow entry is failed"); |
| 347 return false; |
| 348 } |
| 349 |
| 350 if (!count) |
| 351 return false; |
| 352 |
| 353 zone = page_zone(page); |
| 354 mt = get_pageblock_migratetype(page); |
| 355 if (!is_migrate_isolate(mt)) |
| 356 __mod_zone_freepage_state(zone, -(1UL << order), mt); |
| 357 |
| 358 set_page_private(page, order); |
| 359 |
| 360 spin_lock_irqsave(&shadow_lock, flags); |
| 361 list_add(&page->lru, &unmap_list); |
| 362 spin_unlock_irqrestore(&shadow_lock, flags); |
| 363 |
| 364 schedule_work(&kasan_unmap_shadow_work); |
| 365 |
| 366 return true; |
| 367 } |
| 368 |
119 /* | 369 /* |
120 * All functions below always inlined so compiler could | 370 * All functions below always inlined so compiler could |
121 * perform better optimizations in each of __asan_loadX/__assn_storeX | 371 * perform better optimizations in each of __asan_loadX/__assn_storeX |
122 * depending on memory access size X. | 372 * depending on memory access size X. |
123 */ | 373 */ |
124 | 374 |
125 static __always_inline bool memory_is_poisoned_1(unsigned long addr) | 375 static __always_inline bool memory_is_poisoned_1(unsigned long addr) |
126 { | 376 { |
127 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); | 377 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); |
128 | 378 |
129 if (unlikely(shadow_value)) { | 379 if (unlikely(shadow_value)) { |
130 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; | 380 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; |
131 return unlikely(last_accessible_byte >= shadow_value); | 381 return unlikely(last_accessible_byte >= shadow_value); |
132 } | 382 } |
133 | 383 |
134 return false; | 384 return false; |
135 } | 385 } |
136 | 386 |
137 static __always_inline bool memory_is_poisoned_2(unsigned long addr) | 387 static __always_inline bool memory_is_poisoned_2(unsigned long addr) |
138 { | 388 { |
139 » u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); | 389 » if (unlikely(memory_is_poisoned_1(addr))) |
| 390 » » return true; |
140 | 391 |
141 » if (unlikely(*shadow_addr)) { | 392 » /* |
142 » » if (memory_is_poisoned_1(addr + 1)) | 393 » * If single shadow byte covers 2-byte access, we don't |
143 » » » return true; | 394 » * need to do anything more. Otherwise, test the first |
| 395 » * shadow byte. |
| 396 » */ |
| 397 » if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0)) |
| 398 » » return false; |
144 | 399 |
145 » » /* | 400 » return memory_is_poisoned_1(addr + 1); |
146 » » * If single shadow byte covers 2-byte access, we don't | |
147 » » * need to do anything more. Otherwise, test the first | |
148 » » * shadow byte. | |
149 » » */ | |
150 » » if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0)) | |
151 » » » return false; | |
152 | |
153 » » return unlikely(*(u8 *)shadow_addr); | |
154 » } | |
155 | |
156 » return false; | |
157 } | 401 } |
158 | 402 |
159 static __always_inline bool memory_is_poisoned_4(unsigned long addr) | 403 static __always_inline bool memory_is_poisoned_4(unsigned long addr) |
160 { | 404 { |
161 » u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); | 405 » if (unlikely(memory_is_poisoned_1(addr + 3))) |
| 406 » » return true; |
162 | 407 |
163 » if (unlikely(*shadow_addr)) { | 408 » /* |
164 » » if (memory_is_poisoned_1(addr + 3)) | 409 » * If single shadow byte covers 4-byte access, we don't |
165 » » » return true; | 410 » * need to do anything more. Otherwise, test the first |
| 411 » * shadow byte. |
| 412 » */ |
| 413 » if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3)) |
| 414 » » return false; |
166 | 415 |
167 » » /* | 416 » return memory_is_poisoned_1(addr); |
168 » » * If single shadow byte covers 4-byte access, we don't | |
169 » » * need to do anything more. Otherwise, test the first | |
170 » » * shadow byte. | |
171 » » */ | |
172 » » if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3)) | |
173 » » » return false; | |
174 | |
175 » » return unlikely(*(u8 *)shadow_addr); | |
176 » } | |
177 | |
178 » return false; | |
179 } | 417 } |
180 | 418 |
181 static __always_inline bool memory_is_poisoned_8(unsigned long addr) | 419 static __always_inline bool memory_is_poisoned_8(unsigned long addr) |
182 { | 420 { |
183 » u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); | 421 » u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr); |
184 | 422 |
185 » if (unlikely(*shadow_addr)) { | 423 » if (unlikely(*shadow_addr)) |
186 » » if (memory_is_poisoned_1(addr + 7)) | 424 » » return true; |
187 » » » return true; | |
188 | 425 |
189 » » /* | 426 » /* |
190 » » * If single shadow byte covers 8-byte access, we don't | 427 » * If single shadow byte covers 8-byte access, we don't |
191 » » * need to do anything more. Otherwise, test the first | 428 » * need to do anything more. Otherwise, test the first |
192 » » * shadow byte. | 429 » * shadow byte. |
193 » » */ | 430 » */ |
194 » » if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) | 431 » if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) |
195 » » » return false; | 432 » » return false; |
196 | 433 |
197 » » return unlikely(*(u8 *)shadow_addr); | 434 » if (unlikely(memory_is_poisoned_1(addr + 7))) |
198 » } | 435 » » return true; |
199 | 436 |
200 return false; | 437 return false; |
201 } | 438 } |
202 | 439 |
203 static __always_inline bool memory_is_poisoned_16(unsigned long addr) | 440 static __always_inline bool memory_is_poisoned_16(unsigned long addr) |
204 { | 441 { |
205 » u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr); | 442 » u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); |
206 | 443 |
207 » if (unlikely(*shadow_addr)) { | 444 » if (unlikely(*shadow_addr)) |
208 » » u16 shadow_first_bytes = *(u16 *)shadow_addr; | 445 » » return true; |
209 | 446 |
210 » » if (unlikely(shadow_first_bytes)) | 447 » /* |
211 » » » return true; | 448 » * If two shadow bytes covers 16-byte access, we don't |
| 449 » * need to do anything more. Otherwise, test the last |
| 450 » * shadow byte. |
| 451 » */ |
| 452 » if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) |
| 453 » » return false; |
212 | 454 |
213 » » /* | 455 » if (unlikely(memory_is_poisoned_1(addr + 15))) |
214 » » * If two shadow bytes covers 16-byte access, we don't | 456 » » return true; |
215 » » * need to do anything more. Otherwise, test the last | |
216 » » * shadow byte. | |
217 » » */ | |
218 » » if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) | |
219 » » » return false; | |
220 | |
221 » » return memory_is_poisoned_1(addr + 15); | |
222 » } | |
223 | 457 |
224 return false; | 458 return false; |
225 } | 459 } |
226 | 460 |
227 static __always_inline unsigned long bytes_is_zero(const u8 *start, | 461 static __always_inline unsigned long bytes_is_nonzero(const u8 *start, |
228 size_t size) | 462 size_t size) |
229 { | 463 { |
230 while (size) { | 464 while (size) { |
231 if (unlikely(*start)) | 465 if (unlikely(*start)) |
232 return (unsigned long)start; | 466 return (unsigned long)start; |
233 start++; | 467 start++; |
234 size--; | 468 size--; |
235 } | 469 } |
236 | 470 |
237 return 0; | 471 return 0; |
238 } | 472 } |
239 | 473 |
240 static __always_inline unsigned long memory_is_zero(const void *start, | 474 static __always_inline unsigned long memory_is_nonzero(const void *start, |
241 const void *end) | 475 const void *end) |
242 { | 476 { |
243 unsigned int words; | 477 unsigned int words; |
244 unsigned long ret; | 478 unsigned long ret; |
245 unsigned int prefix = (unsigned long)start % 8; | 479 unsigned int prefix = (unsigned long)start % 8; |
246 | 480 |
247 if (end - start <= 16) | 481 if (end - start <= 16) |
248 » » return bytes_is_zero(start, end - start); | 482 » » return bytes_is_nonzero(start, end - start); |
249 | 483 |
250 if (prefix) { | 484 if (prefix) { |
251 prefix = 8 - prefix; | 485 prefix = 8 - prefix; |
252 » » ret = bytes_is_zero(start, prefix); | 486 » » ret = bytes_is_nonzero(start, prefix); |
253 if (unlikely(ret)) | 487 if (unlikely(ret)) |
254 return ret; | 488 return ret; |
255 start += prefix; | 489 start += prefix; |
256 } | 490 } |
257 | 491 |
258 words = (end - start) / 8; | 492 words = (end - start) / 8; |
259 while (words) { | 493 while (words) { |
260 if (unlikely(*(u64 *)start)) | 494 if (unlikely(*(u64 *)start)) |
261 » » » return bytes_is_zero(start, 8); | 495 » » » return bytes_is_nonzero(start, 8); |
262 start += 8; | 496 start += 8; |
263 words--; | 497 words--; |
264 } | 498 } |
265 | 499 |
266 » return bytes_is_zero(start, (end - start) % 8); | 500 » return bytes_is_nonzero(start, (end - start) % 8); |
267 } | 501 } |
268 | 502 |
269 static __always_inline bool memory_is_poisoned_n(unsigned long addr, | 503 static __always_inline bool memory_is_poisoned_n(unsigned long addr, |
270 size_t size) | 504 size_t size) |
271 { | 505 { |
272 unsigned long ret; | 506 unsigned long ret; |
273 | 507 |
274 » ret = memory_is_zero(kasan_mem_to_shadow((void *)addr), | 508 » ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr), |
275 kasan_mem_to_shadow((void *)addr + size - 1) + 1); | 509 kasan_mem_to_shadow((void *)addr + size - 1) + 1); |
276 | 510 |
277 if (unlikely(ret)) { | 511 if (unlikely(ret)) { |
278 unsigned long last_byte = addr + size - 1; | 512 unsigned long last_byte = addr + size - 1; |
279 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); | 513 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); |
280 | 514 |
281 if (unlikely(ret != (unsigned long)last_shadow || | 515 if (unlikely(ret != (unsigned long)last_shadow || |
282 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))
) | 516 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))
) |
283 return true; | 517 return true; |
284 } | 518 } |
285 return false; | 519 return false; |
286 } | 520 } |
287 | 521 |
| 522 static __always_inline u8 pshadow_val_builtin(unsigned long addr, size_t size) |
| 523 { |
| 524 u8 shadow_val = *(u8 *)kasan_mem_to_pshadow((void *)addr); |
| 525 |
| 526 if (shadow_val == KASAN_PER_PAGE_FREE) |
| 527 return shadow_val; |
| 528 |
| 529 if (likely(((addr + size - 1) & PAGE_MASK) >= (size - 1))) |
| 530 return shadow_val; |
| 531 |
| 532 if (shadow_val != *(u8 *)kasan_mem_to_pshadow((void *)addr + size - 1)) |
| 533 return KASAN_PER_PAGE_FREE; |
| 534 |
| 535 return shadow_val; |
| 536 } |
| 537 |
| 538 static __always_inline u8 pshadow_val_n(unsigned long addr, size_t size) |
| 539 { |
| 540 u8 *start, *end; |
| 541 u8 shadow_val; |
| 542 |
| 543 start = kasan_mem_to_pshadow((void *)addr); |
| 544 end = kasan_mem_to_pshadow((void *)addr + size - 1); |
| 545 size = end - start + 1; |
| 546 |
| 547 shadow_val = *start; |
| 548 if (shadow_val == KASAN_PER_PAGE_FREE) |
| 549 return shadow_val; |
| 550 |
| 551 while (size) { |
| 552 /* |
| 553 * Different shadow value means that access is over |
| 554 * the boundary. Report the error even if it is |
| 555 * in the valid area. |
| 556 */ |
| 557 if (shadow_val != *start) |
| 558 return KASAN_PER_PAGE_FREE; |
| 559 |
| 560 start++; |
| 561 size--; |
| 562 } |
| 563 |
| 564 return shadow_val; |
| 565 } |
| 566 |
| 567 static __always_inline u8 pshadow_val(unsigned long addr, size_t size) |
| 568 { |
| 569 if (!kasan_pshadow_inited()) |
| 570 return KASAN_PER_PAGE_BYPASS; |
| 571 |
| 572 if (__builtin_constant_p(size)) { |
| 573 switch (size) { |
| 574 case 1: |
| 575 case 2: |
| 576 case 4: |
| 577 case 8: |
| 578 case 16: |
| 579 return pshadow_val_builtin(addr, size); |
| 580 default: |
| 581 BUILD_BUG(); |
| 582 } |
| 583 } |
| 584 |
| 585 return pshadow_val_n(addr, size); |
| 586 } |
| 587 |
288 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) | 588 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) |
289 { | 589 { |
290 if (__builtin_constant_p(size)) { | 590 if (__builtin_constant_p(size)) { |
291 switch (size) { | 591 switch (size) { |
292 case 1: | 592 case 1: |
293 return memory_is_poisoned_1(addr); | 593 return memory_is_poisoned_1(addr); |
294 case 2: | 594 case 2: |
295 return memory_is_poisoned_2(addr); | 595 return memory_is_poisoned_2(addr); |
296 case 4: | 596 case 4: |
297 return memory_is_poisoned_4(addr); | 597 return memory_is_poisoned_4(addr); |
298 case 8: | 598 case 8: |
299 return memory_is_poisoned_8(addr); | 599 return memory_is_poisoned_8(addr); |
300 case 16: | 600 case 16: |
301 return memory_is_poisoned_16(addr); | 601 return memory_is_poisoned_16(addr); |
302 default: | 602 default: |
303 BUILD_BUG(); | 603 BUILD_BUG(); |
304 } | 604 } |
305 } | 605 } |
306 | 606 |
307 return memory_is_poisoned_n(addr, size); | 607 return memory_is_poisoned_n(addr, size); |
308 } | 608 } |
309 | 609 |
| 610 static noinline void check_memory_region_slow(unsigned long addr, |
| 611 size_t size, bool write, |
| 612 unsigned long ret_ip) |
| 613 { |
| 614 preempt_disable(); |
| 615 if (!arch_kasan_recheck_prepare(addr, size)) |
| 616 goto report; |
| 617 |
| 618 if (!memory_is_poisoned(addr, size)) { |
| 619 preempt_enable(); |
| 620 return; |
| 621 } |
| 622 |
| 623 report: |
| 624 preempt_enable(); |
| 625 __kasan_report(addr, size, write, ret_ip); |
| 626 } |
| 627 |
310 static __always_inline void check_memory_region_inline(unsigned long addr, | 628 static __always_inline void check_memory_region_inline(unsigned long addr, |
311 size_t size, bool write, | 629 size_t size, bool write, |
312 unsigned long ret_ip) | 630 unsigned long ret_ip) |
313 { | 631 { |
314 if (unlikely(size == 0)) | 632 if (unlikely(size == 0)) |
315 return; | 633 return; |
316 | 634 |
317 if (unlikely((void *)addr < | 635 if (unlikely((void *)addr < |
318 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { | 636 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { |
319 » » kasan_report(addr, size, write, ret_ip); | 637 » » __kasan_report(addr, size, write, ret_ip); |
320 return; | 638 return; |
321 } | 639 } |
322 | 640 |
323 if (likely(!memory_is_poisoned(addr, size))) | 641 if (likely(!memory_is_poisoned(addr, size))) |
324 return; | 642 return; |
325 | 643 |
326 » kasan_report(addr, size, write, ret_ip); | 644 » if (!pshadow_val(addr, size)) |
| 645 » » return; |
| 646 |
| 647 » check_memory_region_slow(addr, size, write, ret_ip); |
327 } | 648 } |
328 | 649 |
329 static void check_memory_region(unsigned long addr, | 650 static void check_memory_region(unsigned long addr, |
330 size_t size, bool write, | 651 size_t size, bool write, |
331 unsigned long ret_ip) | 652 unsigned long ret_ip) |
332 { | 653 { |
333 check_memory_region_inline(addr, size, write, ret_ip); | 654 check_memory_region_inline(addr, size, write, ret_ip); |
334 } | 655 } |
335 | 656 |
336 void kasan_check_read(const void *p, unsigned int size) | 657 void kasan_check_read(const void *p, unsigned int size) |
(...skipping 27 matching lines...) Expand all Loading... |
364 | 685 |
365 #undef memcpy | 686 #undef memcpy |
366 void *memcpy(void *dest, const void *src, size_t len) | 687 void *memcpy(void *dest, const void *src, size_t len) |
367 { | 688 { |
368 check_memory_region((unsigned long)src, len, false, _RET_IP_); | 689 check_memory_region((unsigned long)src, len, false, _RET_IP_); |
369 check_memory_region((unsigned long)dest, len, true, _RET_IP_); | 690 check_memory_region((unsigned long)dest, len, true, _RET_IP_); |
370 | 691 |
371 return __memcpy(dest, src, len); | 692 return __memcpy(dest, src, len); |
372 } | 693 } |
373 | 694 |
| 695 void kasan_report(unsigned long addr, size_t size, |
| 696 bool is_write, unsigned long ip) |
| 697 { |
| 698 if (!pshadow_val(addr, size)) |
| 699 return; |
| 700 |
| 701 check_memory_region_slow(addr, size, is_write, ip); |
| 702 } |
| 703 |
374 void kasan_alloc_pages(struct page *page, unsigned int order) | 704 void kasan_alloc_pages(struct page *page, unsigned int order) |
375 { | 705 { |
376 » if (likely(!PageHighMem(page))) | 706 » if (likely(!PageHighMem(page))) { |
377 » » kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); | 707 » » if (!kasan_pshadow_inited()) { |
| 708 » » » kasan_unpoison_shadow(page_address(page), |
| 709 » » » » » PAGE_SIZE << order); |
| 710 » » » return; |
| 711 » » } |
| 712 |
| 713 » » kasan_unpoison_pshadow(page_address(page), PAGE_SIZE << order); |
| 714 » } |
378 } | 715 } |
379 | 716 |
380 void kasan_free_pages(struct page *page, unsigned int order) | 717 void kasan_free_pages(struct page *page, unsigned int order) |
381 { | 718 { |
382 » if (likely(!PageHighMem(page))) | 719 » if (likely(!PageHighMem(page))) { |
383 » » kasan_poison_shadow(page_address(page), | 720 » » if (!kasan_pshadow_inited()) { |
384 » » » » PAGE_SIZE << order, | 721 » » » kasan_poison_shadow(page_address(page), |
385 » » » » KASAN_FREE_PAGE); | 722 » » » » » PAGE_SIZE << order, |
| 723 » » » » » KASAN_FREE_PAGE); |
| 724 » » » return; |
| 725 » » } |
| 726 |
| 727 » » kasan_mark_pshadow(page_address(page), |
| 728 » » » » » PAGE_SIZE << order, |
| 729 » » » » » KASAN_PER_PAGE_FREE); |
| 730 » } |
| 731 } |
| 732 |
| 733 bool kasan_free_buddy(struct page *page, unsigned int order, |
| 734 » » » unsigned int max_order) |
| 735 { |
| 736 » if (!kasan_pshadow_inited()) |
| 737 » » return false; |
| 738 |
| 739 » return kasan_unmap_shadow(page, order, max_order); |
386 } | 740 } |
387 | 741 |
388 /* | 742 /* |
389 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. | 743 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. |
390 * For larger allocations larger redzones are used. | 744 * For larger allocations larger redzones are used. |
391 */ | 745 */ |
392 static size_t optimal_redzone(size_t object_size) | 746 static size_t optimal_redzone(size_t object_size) |
393 { | 747 { |
394 int rz = | 748 int rz = |
395 object_size <= 64 - 16 ? 16 : | 749 object_size <= 64 - 16 ? 16 : |
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
611 | 965 |
612 kasan_unpoison_shadow(object, size); | 966 kasan_unpoison_shadow(object, size); |
613 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | 967 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, |
614 KASAN_KMALLOC_REDZONE); | 968 KASAN_KMALLOC_REDZONE); |
615 | 969 |
616 if (cache->flags & SLAB_KASAN) | 970 if (cache->flags & SLAB_KASAN) |
617 set_track(&get_alloc_info(cache, object)->alloc_track, flags); | 971 set_track(&get_alloc_info(cache, object)->alloc_track, flags); |
618 } | 972 } |
619 EXPORT_SYMBOL(kasan_kmalloc); | 973 EXPORT_SYMBOL(kasan_kmalloc); |
620 | 974 |
621 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) | 975 int kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) |
622 { | 976 { |
623 struct page *page; | 977 struct page *page; |
624 unsigned long redzone_start; | 978 unsigned long redzone_start; |
625 unsigned long redzone_end; | 979 unsigned long redzone_end; |
| 980 int err; |
626 | 981 |
627 if (gfpflags_allow_blocking(flags)) | 982 if (gfpflags_allow_blocking(flags)) |
628 quarantine_reduce(); | 983 quarantine_reduce(); |
629 | 984 |
630 if (unlikely(ptr == NULL)) | 985 if (unlikely(ptr == NULL)) |
631 » » return; | 986 » » return 0; |
632 | 987 |
633 page = virt_to_page(ptr); | 988 page = virt_to_page(ptr); |
| 989 err = kasan_slab_page_alloc(ptr, |
| 990 PAGE_SIZE << compound_order(page), flags); |
| 991 if (err) |
| 992 return err; |
| 993 |
634 redzone_start = round_up((unsigned long)(ptr + size), | 994 redzone_start = round_up((unsigned long)(ptr + size), |
635 KASAN_SHADOW_SCALE_SIZE); | 995 KASAN_SHADOW_SCALE_SIZE); |
636 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); | 996 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); |
637 | 997 |
638 kasan_unpoison_shadow(ptr, size); | 998 kasan_unpoison_shadow(ptr, size); |
639 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, | 999 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, |
640 KASAN_PAGE_REDZONE); | 1000 KASAN_PAGE_REDZONE); |
| 1001 |
| 1002 return 0; |
641 } | 1003 } |
642 | 1004 |
643 void kasan_krealloc(const void *object, size_t size, gfp_t flags) | 1005 void kasan_krealloc(const void *object, size_t size, gfp_t flags) |
644 { | 1006 { |
645 struct page *page; | 1007 struct page *page; |
646 | 1008 |
647 if (unlikely(object == ZERO_SIZE_PTR)) | 1009 if (unlikely(object == ZERO_SIZE_PTR)) |
648 return; | 1010 return; |
649 | 1011 |
650 page = virt_to_head_page(object); | 1012 page = virt_to_head_page(object); |
(...skipping 18 matching lines...) Expand all Loading... |
669 } | 1031 } |
670 | 1032 |
671 void kasan_kfree_large(const void *ptr) | 1033 void kasan_kfree_large(const void *ptr) |
672 { | 1034 { |
673 struct page *page = virt_to_page(ptr); | 1035 struct page *page = virt_to_page(ptr); |
674 | 1036 |
675 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), | 1037 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), |
676 KASAN_FREE_PAGE); | 1038 KASAN_FREE_PAGE); |
677 } | 1039 } |
678 | 1040 |
| 1041 int kasan_slab_page_alloc(const void *addr, size_t size, gfp_t flags) |
| 1042 { |
| 1043 int err; |
| 1044 |
| 1045 if (!kasan_pshadow_inited() || !addr) |
| 1046 return 0; |
| 1047 |
| 1048 err = kasan_map_shadow(addr, size, flags); |
| 1049 if (err) |
| 1050 return err; |
| 1051 |
| 1052 kasan_unpoison_shadow(addr, size); |
| 1053 kasan_poison_pshadow(addr, size); |
| 1054 |
| 1055 return 0; |
| 1056 } |
| 1057 |
| 1058 void kasan_slab_page_free(const void *addr, size_t size) |
| 1059 { |
| 1060 if (!kasan_pshadow_inited() || !addr) |
| 1061 return; |
| 1062 |
| 1063 kasan_poison_shadow(addr, size, KASAN_FREE_PAGE); |
| 1064 } |
| 1065 |
679 int kasan_module_alloc(void *addr, size_t size) | 1066 int kasan_module_alloc(void *addr, size_t size) |
680 { | 1067 { |
681 void *ret; | 1068 void *ret; |
682 size_t shadow_size; | 1069 size_t shadow_size; |
683 unsigned long shadow_start; | 1070 unsigned long shadow_start; |
684 | 1071 |
685 shadow_start = (unsigned long)kasan_mem_to_shadow(addr); | 1072 shadow_start = (unsigned long)kasan_mem_to_shadow(addr); |
686 shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT, | 1073 shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT, |
687 PAGE_SIZE); | 1074 PAGE_SIZE); |
688 | 1075 |
(...skipping 14 matching lines...) Expand all Loading... |
703 | 1090 |
704 return -ENOMEM; | 1091 return -ENOMEM; |
705 } | 1092 } |
706 | 1093 |
707 void kasan_free_shadow(const struct vm_struct *vm) | 1094 void kasan_free_shadow(const struct vm_struct *vm) |
708 { | 1095 { |
709 if (vm->flags & VM_KASAN) | 1096 if (vm->flags & VM_KASAN) |
710 vfree(kasan_mem_to_shadow(vm->addr)); | 1097 vfree(kasan_mem_to_shadow(vm->addr)); |
711 } | 1098 } |
712 | 1099 |
| 1100 int kasan_stack_alloc(const void *addr, size_t size) |
| 1101 { |
| 1102 int err; |
| 1103 |
| 1104 if (!kasan_pshadow_inited() || !addr) |
| 1105 return 0; |
| 1106 |
| 1107 err = kasan_map_shadow(addr, size, THREADINFO_GFP); |
| 1108 if (err) |
| 1109 return err; |
| 1110 |
| 1111 kasan_unpoison_shadow(addr, size); |
| 1112 kasan_poison_pshadow(addr, size); |
| 1113 |
| 1114 return 0; |
| 1115 } |
| 1116 |
| 1117 void kasan_stack_free(const void *addr, size_t size) |
| 1118 { |
| 1119 if (!kasan_pshadow_inited() || !addr) |
| 1120 return; |
| 1121 |
| 1122 kasan_poison_shadow(addr, size, KASAN_FREE_PAGE); |
| 1123 } |
| 1124 |
713 static void register_global(struct kasan_global *global) | 1125 static void register_global(struct kasan_global *global) |
714 { | 1126 { |
715 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); | 1127 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); |
716 | 1128 |
717 kasan_unpoison_shadow(global->beg, global->size); | 1129 kasan_unpoison_shadow(global->beg, global->size); |
718 | 1130 |
719 kasan_poison_shadow(global->beg + aligned_size, | 1131 kasan_poison_shadow(global->beg + aligned_size, |
720 global->size_with_redzone - aligned_size, | 1132 global->size_with_redzone - aligned_size, |
721 KASAN_GLOBAL_REDZONE); | 1133 KASAN_GLOBAL_REDZONE); |
722 } | 1134 } |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
814 pr_info("WARNING: KASAN doesn't support memory hot-add\n"); | 1226 pr_info("WARNING: KASAN doesn't support memory hot-add\n"); |
815 pr_info("Memory hot-add will be disabled\n"); | 1227 pr_info("Memory hot-add will be disabled\n"); |
816 | 1228 |
817 hotplug_memory_notifier(kasan_mem_notifier, 0); | 1229 hotplug_memory_notifier(kasan_mem_notifier, 0); |
818 | 1230 |
819 return 0; | 1231 return 0; |
820 } | 1232 } |
821 | 1233 |
822 module_init(kasan_memhotplug_init); | 1234 module_init(kasan_memhotplug_init); |
823 #endif | 1235 #endif |
OLD | NEW |