OLD | NEW |
1 /* | 1 /* |
2 * linux/kernel/fork.c | 2 * linux/kernel/fork.c |
3 * | 3 * |
4 * Copyright (C) 1991, 1992 Linus Torvalds | 4 * Copyright (C) 1991, 1992 Linus Torvalds |
5 */ | 5 */ |
6 | 6 |
7 /* | 7 /* |
8 * 'fork.c' contains the help-routines for the 'fork' system call | 8 * 'fork.c' contains the help-routines for the 'fork' system call |
9 * (see also entry.S and others). | 9 * (see also entry.S and others). |
10 * Fork is rather simple, once you get the hang of it, but the memory | 10 * Fork is rather simple, once you get the hang of it, but the memory |
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
230 * free_thread_stack() can be called in interrupt context, | 230 * free_thread_stack() can be called in interrupt context, |
231 * so cache the vm_struct. | 231 * so cache the vm_struct. |
232 */ | 232 */ |
233 if (stack) | 233 if (stack) |
234 tsk->stack_vm_area = find_vm_area(stack); | 234 tsk->stack_vm_area = find_vm_area(stack); |
235 return stack; | 235 return stack; |
236 #else | 236 #else |
237 struct page *page = alloc_pages_node(node, THREADINFO_GFP, | 237 struct page *page = alloc_pages_node(node, THREADINFO_GFP, |
238 THREAD_SIZE_ORDER); | 238 THREAD_SIZE_ORDER); |
239 | 239 |
| 240 if (kasan_stack_alloc(page ? page_address(page) : NULL, |
| 241 PAGE_SIZE << THREAD_SIZE_ORDER)) { |
| 242 __free_pages(page, THREAD_SIZE_ORDER); |
| 243 page = NULL; |
| 244 } |
| 245 |
240 return page ? page_address(page) : NULL; | 246 return page ? page_address(page) : NULL; |
241 #endif | 247 #endif |
242 } | 248 } |
243 | 249 |
244 static inline void free_thread_stack(struct task_struct *tsk) | 250 static inline void free_thread_stack(struct task_struct *tsk) |
245 { | 251 { |
246 #ifdef CONFIG_VMAP_STACK | 252 #ifdef CONFIG_VMAP_STACK |
247 if (task_stack_vm_area(tsk)) { | 253 if (task_stack_vm_area(tsk)) { |
248 unsigned long flags; | 254 unsigned long flags; |
249 int i; | 255 int i; |
250 | 256 |
251 local_irq_save(flags); | 257 local_irq_save(flags); |
252 for (i = 0; i < NR_CACHED_STACKS; i++) { | 258 for (i = 0; i < NR_CACHED_STACKS; i++) { |
253 if (this_cpu_read(cached_stacks[i])) | 259 if (this_cpu_read(cached_stacks[i])) |
254 continue; | 260 continue; |
255 | 261 |
256 this_cpu_write(cached_stacks[i], tsk->stack_vm_area); | 262 this_cpu_write(cached_stacks[i], tsk->stack_vm_area); |
257 local_irq_restore(flags); | 263 local_irq_restore(flags); |
258 return; | 264 return; |
259 } | 265 } |
260 local_irq_restore(flags); | 266 local_irq_restore(flags); |
261 | 267 |
262 vfree_atomic(tsk->stack); | 268 vfree_atomic(tsk->stack); |
263 return; | 269 return; |
264 } | 270 } |
265 #endif | 271 #endif |
266 | 272 |
| 273 kasan_stack_free(tsk->stack, PAGE_SIZE << THREAD_SIZE_ORDER); |
267 __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER); | 274 __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER); |
268 } | 275 } |
269 # else | 276 # else |
270 static struct kmem_cache *thread_stack_cache; | 277 static struct kmem_cache *thread_stack_cache; |
271 | 278 |
272 static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, | 279 static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, |
273 int node) | 280 int node) |
274 { | 281 { |
275 return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); | 282 return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); |
276 } | 283 } |
(...skipping 2172 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2449 t.extra2 = &max; | 2456 t.extra2 = &max; |
2450 | 2457 |
2451 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); | 2458 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); |
2452 if (ret || !write) | 2459 if (ret || !write) |
2453 return ret; | 2460 return ret; |
2454 | 2461 |
2455 set_max_threads(threads); | 2462 set_max_threads(threads); |
2456 | 2463 |
2457 return 0; | 2464 return 0; |
2458 } | 2465 } |
OLD | NEW |