OLD | NEW |
1 /* | 1 /* |
2 * SLUB: A slab allocator that limits cache line use instead of queuing | 2 * SLUB: A slab allocator that limits cache line use instead of queuing |
3 * objects in per cpu and per node lists. | 3 * objects in per cpu and per node lists. |
4 * | 4 * |
5 * The allocator synchronizes using per slab locks or atomic operatios | 5 * The allocator synchronizes using per slab locks or atomic operatios |
6 * and only uses a centralized lock to manage a pool of partial slabs. | 6 * and only uses a centralized lock to manage a pool of partial slabs. |
7 * | 7 * |
8 * (C) 2007 SGI, Christoph Lameter | 8 * (C) 2007 SGI, Christoph Lameter |
9 * (C) 2011 Linux Foundation, Christoph Lameter | 9 * (C) 2011 Linux Foundation, Christoph Lameter |
10 */ | 10 */ |
(...skipping 1391 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1402 struct page *page; | 1402 struct page *page; |
1403 int order = oo_order(oo); | 1403 int order = oo_order(oo); |
1404 | 1404 |
1405 flags |= __GFP_NOTRACK; | 1405 flags |= __GFP_NOTRACK; |
1406 | 1406 |
1407 if (node == NUMA_NO_NODE) | 1407 if (node == NUMA_NO_NODE) |
1408 page = alloc_pages(flags, order); | 1408 page = alloc_pages(flags, order); |
1409 else | 1409 else |
1410 page = __alloc_pages_node(node, flags, order); | 1410 page = __alloc_pages_node(node, flags, order); |
1411 | 1411 |
1412 » if (page && memcg_charge_slab(page, flags, order, s)) { | 1412 » if (kasan_slab_page_alloc(page ? page_address(page) : NULL, |
| 1413 » » » » PAGE_SIZE << order, flags)) { |
1413 __free_pages(page, order); | 1414 __free_pages(page, order); |
1414 page = NULL; | 1415 page = NULL; |
1415 } | 1416 } |
| 1417 |
| 1418 if (page && memcg_charge_slab(page, flags, order, s)) { |
| 1419 kasan_slab_page_free(page_address(page), PAGE_SIZE << order); |
| 1420 __free_pages(page, order); |
| 1421 page = NULL; |
| 1422 } |
1416 | 1423 |
1417 return page; | 1424 return page; |
1418 } | 1425 } |
1419 | 1426 |
1420 #ifdef CONFIG_SLAB_FREELIST_RANDOM | 1427 #ifdef CONFIG_SLAB_FREELIST_RANDOM |
1421 /* Pre-initialize the random sequence cache */ | 1428 /* Pre-initialize the random sequence cache */ |
1422 static int init_cache_random_seq(struct kmem_cache *s) | 1429 static int init_cache_random_seq(struct kmem_cache *s) |
1423 { | 1430 { |
1424 int err; | 1431 int err; |
1425 unsigned long i, count = oo_objects(s->oo); | 1432 unsigned long i, count = oo_objects(s->oo); |
(...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1660 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, | 1667 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, |
1661 -pages); | 1668 -pages); |
1662 | 1669 |
1663 __ClearPageSlabPfmemalloc(page); | 1670 __ClearPageSlabPfmemalloc(page); |
1664 __ClearPageSlab(page); | 1671 __ClearPageSlab(page); |
1665 | 1672 |
1666 page_mapcount_reset(page); | 1673 page_mapcount_reset(page); |
1667 if (current->reclaim_state) | 1674 if (current->reclaim_state) |
1668 current->reclaim_state->reclaimed_slab += pages; | 1675 current->reclaim_state->reclaimed_slab += pages; |
1669 memcg_uncharge_slab(page, order, s); | 1676 memcg_uncharge_slab(page, order, s); |
| 1677 kasan_slab_page_free(page_address(page), PAGE_SIZE << order); |
1670 __free_pages(page, order); | 1678 __free_pages(page, order); |
1671 } | 1679 } |
1672 | 1680 |
1673 #define need_reserve_slab_rcu \ | 1681 #define need_reserve_slab_rcu \ |
1674 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) | 1682 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) |
1675 | 1683 |
1676 static void rcu_free_slab(struct rcu_head *h) | 1684 static void rcu_free_slab(struct rcu_head *h) |
1677 { | 1685 { |
1678 struct page *page; | 1686 struct page *page; |
1679 | 1687 |
(...skipping 4146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5826 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) | 5834 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) |
5827 { | 5835 { |
5828 } | 5836 } |
5829 | 5837 |
5830 ssize_t slabinfo_write(struct file *file, const char __user *buffer, | 5838 ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
5831 size_t count, loff_t *ppos) | 5839 size_t count, loff_t *ppos) |
5832 { | 5840 { |
5833 return -EIO; | 5841 return -EIO; |
5834 } | 5842 } |
5835 #endif /* CONFIG_SLABINFO */ | 5843 #endif /* CONFIG_SLABINFO */ |
OLD | NEW |