diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 215 |
1 files changed, 170 insertions, 45 deletions
diff --git a/mm/slub.c b/mm/slub.c index 5308227e65db..34394571e7b0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -261,7 +261,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, * freepointer to be restored incorrectly. */ return (void *)((unsigned long)ptr ^ s->random ^ - (unsigned long)kasan_reset_tag((void *)ptr_addr)); + swab((unsigned long)kasan_reset_tag((void *)ptr_addr))); #else return ptr; #endif @@ -644,6 +644,20 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...) va_end(args); } +static bool freelist_corrupted(struct kmem_cache *s, struct page *page, + void **freelist, void *nextfree) +{ + if ((s->flags & SLAB_CONSISTENCY_CHECKS) && + !check_valid_pointer(s, page, nextfree) && freelist) { + object_err(s, page, *freelist, "Freechain corrupt"); + *freelist = NULL; + slab_fix(s, "Isolate corrupted freechain"); + return true; + } + + return false; +} + static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) { unsigned int off; /* Offset of last byte */ @@ -1175,7 +1189,7 @@ static noinline int free_debug_processing( unsigned long uninitialized_var(flags); int ret = 0; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); slab_lock(page); if (s->flags & SLAB_CONSISTENCY_CHECKS) { @@ -1210,7 +1224,7 @@ out: bulk_cnt, cnt); slab_unlock(page); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); if (!ret) slab_fix(s, "Object at 0x%p not freed", object); return ret; @@ -1376,8 +1390,19 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} +static bool freelist_corrupted(struct kmem_cache *s, struct page *page, + void **freelist, void *nextfree) +{ + return false; +} #endif /* CONFIG_SLUB_DEBUG */ +struct slub_free_list { + raw_spinlock_t lock; + struct list_head list; +}; +static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); + /* * Hooks for other subsystems that check memory allocations. In a typical * production configuration these hooks all should produce no code at all. @@ -1615,10 +1640,18 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) void *start, *p, *next; int idx, order; bool shuffle; + bool enableirqs = false; flags &= gfp_allowed_mask; if (gfpflags_allow_blocking(flags)) + enableirqs = true; + +#ifdef CONFIG_PREEMPT_RT_FULL + if (system_state > SYSTEM_BOOTING) + enableirqs = true; +#endif + if (enableirqs) local_irq_enable(); flags |= s->allocflags; @@ -1678,7 +1711,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) page->frozen = 1; out: - if (gfpflags_allow_blocking(flags)) + if (enableirqs) local_irq_disable(); if (!page) return NULL; @@ -1736,6 +1769,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page) __free_pages(page, order); } +static void free_delayed(struct list_head *h) +{ + while (!list_empty(h)) { + struct page *page = list_first_entry(h, struct page, lru); + + list_del(&page->lru); + __free_slab(page->slab_cache, page); + } +} + static void rcu_free_slab(struct rcu_head *h) { struct page *page = container_of(h, struct page, rcu_head); @@ -1747,6 +1790,12 @@ static void free_slab(struct kmem_cache *s, struct page *page) { if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { call_rcu(&page->rcu_head, rcu_free_slab); + } else if (irqs_disabled()) { + struct slub_free_list *f = this_cpu_ptr(&slub_free_list); + + raw_spin_lock(&f->lock); + list_add(&page->lru, &f->list); + raw_spin_unlock(&f->lock); } else __free_slab(s, page); } @@ -1854,7 +1903,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, if (!n || !n->nr_partial) return NULL; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); list_for_each_entry_safe(page, page2, &n->partial, slab_list) { void *t; @@ -1879,7 +1928,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, break; } - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); return object; } @@ -1958,8 +2007,6 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, if (node == NUMA_NO_NODE) searchnode = numa_mem_id(); - else if (!node_present_pages(node)) - searchnode = node_to_mem_node(node); object = get_partial_node(s, get_node(s, searchnode), c, flags); if (object || node != NUMA_NO_NODE) @@ -2067,6 +2114,14 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, void *prior; unsigned long counters; + /* + * If 'nextfree' is invalid, it is possible that the object at + * 'freelist' is already corrupted. So isolate all objects + * starting at 'freelist'. + */ + if (freelist_corrupted(s, page, &freelist, nextfree)) + break; + do { prior = page->freelist; counters = page->counters; @@ -2125,7 +2180,7 @@ redo: * that acquire_slab() will see a slab page that * is frozen */ - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } } else { m = M_FULL; @@ -2136,7 +2191,7 @@ redo: * slabs from diagnostic functions will not see * any frozen slabs. */ - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } } @@ -2160,7 +2215,7 @@ redo: goto redo; if (lock) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); if (m == M_PARTIAL) stat(s, tail); @@ -2199,10 +2254,10 @@ static void unfreeze_partials(struct kmem_cache *s, n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); n = n2; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } do { @@ -2231,7 +2286,7 @@ static void unfreeze_partials(struct kmem_cache *s, } if (n) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); while (discard_page) { page = discard_page; @@ -2268,14 +2323,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { + struct slub_free_list *f; unsigned long flags; + LIST_HEAD(tofree); /* * partial array is full. Move the existing * set to the per node partial list. */ local_irq_save(flags); unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); + f = this_cpu_ptr(&slub_free_list); + raw_spin_lock(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock(&f->lock); local_irq_restore(flags); + free_delayed(&tofree); oldpage = NULL; pobjects = 0; pages = 0; @@ -2343,7 +2405,19 @@ static bool has_cpu_slab(int cpu, void *info) static void flush_all(struct kmem_cache *s) { + LIST_HEAD(tofree); + int cpu; + on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); + for_each_online_cpu(cpu) { + struct slub_free_list *f; + + f = &per_cpu(slub_free_list, cpu); + raw_spin_lock_irq(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock_irq(&f->lock); + free_delayed(&tofree); + } } /* @@ -2398,10 +2472,10 @@ static unsigned long count_partial(struct kmem_cache_node *n, unsigned long x = 0; struct page *page; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, slab_list) x += get_count(page); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return x; } #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ @@ -2540,23 +2614,35 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) * already disabled (which is the case for bulk allocation). */ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - unsigned long addr, struct kmem_cache_cpu *c) + unsigned long addr, struct kmem_cache_cpu *c, + struct list_head *to_free) { + struct slub_free_list *f; void *freelist; struct page *page; page = c->page; - if (!page) + if (!page) { + /* + * if the node is not online or has no normal memory, just + * ignore the node constraint + */ + if (unlikely(node != NUMA_NO_NODE && + !node_state(node, N_NORMAL_MEMORY))) + node = NUMA_NO_NODE; goto new_slab; + } redo: if (unlikely(!node_match(page, node))) { - int searchnode = node; - - if (node != NUMA_NO_NODE && !node_present_pages(node)) - searchnode = node_to_mem_node(node); - - if (unlikely(!node_match(page, searchnode))) { + /* + * same as above but node_match() being false already + * implies node != NUMA_NO_NODE + */ + if (!node_state(node, N_NORMAL_MEMORY)) { + node = NUMA_NO_NODE; + goto redo; + } else { stat(s, ALLOC_NODE_MISMATCH); deactivate_slab(s, page, c->freelist, c); goto new_slab; @@ -2597,6 +2683,13 @@ load_freelist: VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); + +out: + f = this_cpu_ptr(&slub_free_list); + raw_spin_lock(&f->lock); + list_splice_init(&f->list, to_free); + raw_spin_unlock(&f->lock); + return freelist; new_slab: @@ -2612,7 +2705,7 @@ new_slab: if (unlikely(!freelist)) { slab_out_of_memory(s, gfpflags, node); - return NULL; + goto out; } page = c->page; @@ -2625,7 +2718,7 @@ new_slab: goto new_slab; /* Slab failed checks. Next slab needed */ deactivate_slab(s, page, get_freepointer(s, freelist), c); - return freelist; + goto out; } /* @@ -2637,6 +2730,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, { void *p; unsigned long flags; + LIST_HEAD(tofree); local_irq_save(flags); #ifdef CONFIG_PREEMPT @@ -2648,8 +2742,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, c = this_cpu_ptr(s->cpu_slab); #endif - p = ___slab_alloc(s, gfpflags, node, addr, c); + p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); local_irq_restore(flags); + free_delayed(&tofree); return p; } @@ -2835,7 +2930,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, do { if (unlikely(n)) { - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); n = NULL; } prior = page->freelist; @@ -2867,7 +2962,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); } } @@ -2908,7 +3003,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return; slab_empty: @@ -2923,7 +3018,7 @@ slab_empty: remove_full(s, n, page); } - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); discard_slab(s, page); } @@ -2967,11 +3062,13 @@ redo: barrier(); if (likely(page == c->page)) { - set_freepointer(s, tail_obj, c->freelist); + void **freelist = READ_ONCE(c->freelist); + + set_freepointer(s, tail_obj, freelist); if (unlikely(!this_cpu_cmpxchg_double( s->cpu_slab->freelist, s->cpu_slab->tid, - c->freelist, tid, + freelist, tid, head, next_tid(tid)))) { note_cmpxchg_failure("slab_free", s, tid); @@ -3126,6 +3223,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { struct kmem_cache_cpu *c; + LIST_HEAD(to_free); int i; /* memcg and kmem_cache debug support */ @@ -3145,11 +3243,20 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, if (unlikely(!object)) { /* + * We may have removed an object from c->freelist using + * the fastpath in the previous iteration; in that case, + * c->tid has not been bumped yet. + * Since ___slab_alloc() may reenable interrupts while + * allocating memory, we should bump c->tid now. + */ + c->tid = next_tid(c->tid); + + /* * Invoking slow path likely have side-effect * of re-populating per CPU c->freelist */ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, - _RET_IP_, c); + _RET_IP_, c, &to_free); if (unlikely(!p[i])) goto error; @@ -3161,6 +3268,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, } c->tid = next_tid(c->tid); local_irq_enable(); + free_delayed(&to_free); /* Clear memory outside IRQ disabled fastpath loop */ if (unlikely(flags & __GFP_ZERO)) { @@ -3175,6 +3283,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, return i; error: local_irq_enable(); + free_delayed(&to_free); slab_post_alloc_hook(s, flags, i, p); __kmem_cache_free_bulk(s, i, p); return 0; @@ -3310,7 +3419,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; - spin_lock_init(&n->list_lock); + raw_spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); @@ -3663,6 +3772,11 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, const char *text) { #ifdef CONFIG_SLUB_DEBUG +#ifdef CONFIG_PREEMPT_RT_BASE + /* XXX move out of irq-off section */ + slab_err(s, page, text, s->name); +#else + void *addr = page_address(page); void *p; unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC); @@ -3682,8 +3796,10 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, slab_unlock(page); bitmap_free(map); #endif +#endif } + /* * Attempt to free all partial slabs on a node. * This is called from __kmem_cache_shutdown(). We must take list_lock @@ -3695,7 +3811,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) struct page *page, *h; BUG_ON(irqs_disabled()); - spin_lock_irq(&n->list_lock); + raw_spin_lock_irq(&n->list_lock); list_for_each_entry_safe(page, h, &n->partial, slab_list) { if (!page->inuse) { remove_partial(n, page); @@ -3705,7 +3821,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) "Objects remaining in %s on __kmem_cache_shutdown()"); } } - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); list_for_each_entry_safe(page, h, &discard, slab_list) discard_slab(s, page); @@ -3979,7 +4095,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) for (i = 0; i < SHRINK_PROMOTE_MAX; i++) INIT_LIST_HEAD(promote + i); - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); /* * Build lists of slabs to discard or promote. @@ -4010,7 +4126,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) list_splice(promote + i, &n->partial); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); /* Release empty slabs */ list_for_each_entry_safe(page, t, &discard, slab_list) @@ -4223,6 +4339,12 @@ void __init kmem_cache_init(void) { static __initdata struct kmem_cache boot_kmem_cache, boot_kmem_cache_node; + int cpu; + + for_each_possible_cpu(cpu) { + raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); + INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); + } if (debug_guardpage_minorder()) slub_max_order = 0; @@ -4424,7 +4546,7 @@ static int validate_slab_node(struct kmem_cache *s, struct page *page; unsigned long flags; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, slab_list) { validate_slab_slab(s, page, map); @@ -4446,7 +4568,7 @@ static int validate_slab_node(struct kmem_cache *s, s->name, count, atomic_long_read(&n->nr_slabs)); out: - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return count; } @@ -4632,12 +4754,12 @@ static int list_locations(struct kmem_cache *s, char *buf, if (!atomic_long_read(&n->nr_slabs)) continue; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, slab_list) process_slab(&t, s, page, alloc, map); list_for_each_entry(page, &n->full, slab_list) process_slab(&t, s, page, alloc, map); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); } for (i = 0; i < t.count; i++) { @@ -5601,7 +5723,8 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) */ if (buffer) buf = buffer; - else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf)) + else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf) && + !IS_ENABLED(CONFIG_SLUB_STATS)) buf = mbuf; else { buffer = (char *) get_zeroed_page(GFP_KERNEL); @@ -5756,8 +5879,10 @@ static int sysfs_slab_add(struct kmem_cache *s) s->kobj.kset = kset; err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); - if (err) + if (err) { + kobject_put(&s->kobj); goto out; + } err = sysfs_create_group(&s->kobj, &slab_attr_group); if (err) |