]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - mm/slub.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / mm / slub.c
index 2b3e740609e92e29a7b52ddb6df6b8f0b0897136..1732f9c5d31f4c79cf77780267068d5f48e5ee49 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1141,7 +1141,7 @@ static noinline int free_debug_processing(
        unsigned long uninitialized_var(flags);
        int ret = 0;
 
-       spin_lock_irqsave(&n->list_lock, flags);
+       raw_spin_lock_irqsave(&n->list_lock, flags);
        slab_lock(page);
 
        if (s->flags & SLAB_CONSISTENCY_CHECKS) {
@@ -1176,7 +1176,7 @@ out:
                         bulk_cnt, cnt);
 
        slab_unlock(page);
-       spin_unlock_irqrestore(&n->list_lock, flags);
+       raw_spin_unlock_irqrestore(&n->list_lock, flags);
        if (!ret)
                slab_fix(s, "Object at 0x%p not freed", object);
        return ret;
@@ -1304,6 +1304,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
 
 #endif /* CONFIG_SLUB_DEBUG */
 
+struct slub_free_list {
+       raw_spinlock_t          lock;
+       struct list_head        list;
+};
+static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
+
 /*
  * Hooks for other subsystems that check memory allocations. In a typical
  * production configuration these hooks all should produce no code at all.
@@ -1523,10 +1529,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
        void *start, *p;
        int idx, order;
        bool shuffle;
+       bool enableirqs = false;
 
        flags &= gfp_allowed_mask;
 
        if (gfpflags_allow_blocking(flags))
+               enableirqs = true;
+#ifdef CONFIG_PREEMPT_RT_FULL
+       if (system_state == SYSTEM_RUNNING)
+               enableirqs = true;
+#endif
+       if (enableirqs)
                local_irq_enable();
 
        flags |= s->allocflags;
@@ -1601,7 +1614,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
        page->frozen = 1;
 
 out:
-       if (gfpflags_allow_blocking(flags))
+       if (enableirqs)
                local_irq_disable();
        if (!page)
                return NULL;
@@ -1660,6 +1673,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
        __free_pages(page, order);
 }
 
+static void free_delayed(struct list_head *h)
+{
+       while(!list_empty(h)) {
+               struct page *page = list_first_entry(h, struct page, lru);
+
+               list_del(&page->lru);
+               __free_slab(page->slab_cache, page);
+       }
+}
+
 #define need_reserve_slab_rcu                                          \
        (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
 
@@ -1691,6 +1714,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
                }
 
                call_rcu(head, rcu_free_slab);
+       } else if (irqs_disabled()) {
+               struct slub_free_list *f = this_cpu_ptr(&slub_free_list);
+
+               raw_spin_lock(&f->lock);
+               list_add(&page->lru, &f->list);
+               raw_spin_unlock(&f->lock);
        } else
                __free_slab(s, page);
 }
@@ -1798,7 +1827,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
        if (!n || !n->nr_partial)
                return NULL;
 
-       spin_lock(&n->list_lock);
+       raw_spin_lock(&n->list_lock);
        list_for_each_entry_safe(page, page2, &n->partial, lru) {
                void *t;
 
@@ -1823,7 +1852,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
                        break;
 
        }
-       spin_unlock(&n->list_lock);
+       raw_spin_unlock(&n->list_lock);
        return object;
 }
 
@@ -2069,7 +2098,7 @@ redo:
                         * that acquire_slab() will see a slab page that
                         * is frozen
                         */
-                       spin_lock(&n->list_lock);
+                       raw_spin_lock(&n->list_lock);
                }
        } else {
                m = M_FULL;
@@ -2080,7 +2109,7 @@ redo:
                         * slabs from diagnostic functions will not see
                         * any frozen slabs.
                         */
-                       spin_lock(&n->list_lock);
+                       raw_spin_lock(&n->list_lock);
                }
        }
 
@@ -2115,7 +2144,7 @@ redo:
                goto redo;
 
        if (lock)
-               spin_unlock(&n->list_lock);
+               raw_spin_unlock(&n->list_lock);
 
        if (m == M_FREE) {
                stat(s, DEACTIVATE_EMPTY);
@@ -2147,10 +2176,10 @@ static void unfreeze_partials(struct kmem_cache *s,
                n2 = get_node(s, page_to_nid(page));
                if (n != n2) {
                        if (n)
-                               spin_unlock(&n->list_lock);
+                               raw_spin_unlock(&n->list_lock);
 
                        n = n2;
-                       spin_lock(&n->list_lock);
+                       raw_spin_lock(&n->list_lock);
                }
 
                do {
@@ -2179,7 +2208,7 @@ static void unfreeze_partials(struct kmem_cache *s,
        }
 
        if (n)
-               spin_unlock(&n->list_lock);
+               raw_spin_unlock(&n->list_lock);
 
        while (discard_page) {
                page = discard_page;
@@ -2218,14 +2247,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
                        pobjects = oldpage->pobjects;
                        pages = oldpage->pages;
                        if (drain && pobjects > s->cpu_partial) {
+                               struct slub_free_list *f;
                                unsigned long flags;
+                               LIST_HEAD(tofree);
                                /*
                                 * partial array is full. Move the existing
                                 * set to the per node partial list.
                                 */
                                local_irq_save(flags);
                                unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
+                               f = this_cpu_ptr(&slub_free_list);
+                               raw_spin_lock(&f->lock);
+                               list_splice_init(&f->list, &tofree);
+                               raw_spin_unlock(&f->lock);
                                local_irq_restore(flags);
+                               free_delayed(&tofree);
                                oldpage = NULL;
                                pobjects = 0;
                                pages = 0;
@@ -2297,7 +2333,22 @@ static bool has_cpu_slab(int cpu, void *info)
 
 static void flush_all(struct kmem_cache *s)
 {
+       LIST_HEAD(tofree);
+       int cpu;
+
        on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
+       for_each_online_cpu(cpu) {
+               struct slub_free_list *f;
+
+               if (!has_cpu_slab(cpu, s))
+                       continue;
+
+               f = &per_cpu(slub_free_list, cpu);
+               raw_spin_lock_irq(&f->lock);
+               list_splice_init(&f->list, &tofree);
+               raw_spin_unlock_irq(&f->lock);
+               free_delayed(&tofree);
+       }
 }
 
 /*
@@ -2352,10 +2403,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
        unsigned long x = 0;
        struct page *page;
 
-       spin_lock_irqsave(&n->list_lock, flags);
+       raw_spin_lock_irqsave(&n->list_lock, flags);
        list_for_each_entry(page, &n->partial, lru)
                x += get_count(page);
-       spin_unlock_irqrestore(&n->list_lock, flags);
+       raw_spin_unlock_irqrestore(&n->list_lock, flags);
        return x;
 }
 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
@@ -2493,8 +2544,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
  * already disabled (which is the case for bulk allocation).
  */
 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
-                         unsigned long addr, struct kmem_cache_cpu *c)
+                         unsigned long addr, struct kmem_cache_cpu *c,
+                         struct list_head *to_free)
 {
+       struct slub_free_list *f;
        void *freelist;
        struct page *page;
 
@@ -2554,6 +2607,13 @@ load_freelist:
        VM_BUG_ON(!c->page->frozen);
        c->freelist = get_freepointer(s, freelist);
        c->tid = next_tid(c->tid);
+
+out:
+       f = this_cpu_ptr(&slub_free_list);
+       raw_spin_lock(&f->lock);
+       list_splice_init(&f->list, to_free);
+       raw_spin_unlock(&f->lock);
+
        return freelist;
 
 new_slab:
@@ -2585,7 +2645,7 @@ new_slab:
        deactivate_slab(s, page, get_freepointer(s, freelist));
        c->page = NULL;
        c->freelist = NULL;
-       return freelist;
+       goto out;
 }
 
 /*
@@ -2597,6 +2657,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 {
        void *p;
        unsigned long flags;
+       LIST_HEAD(tofree);
 
        local_irq_save(flags);
 #ifdef CONFIG_PREEMPT
@@ -2608,8 +2669,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
        c = this_cpu_ptr(s->cpu_slab);
 #endif
 
-       p = ___slab_alloc(s, gfpflags, node, addr, c);
+       p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree);
        local_irq_restore(flags);
+       free_delayed(&tofree);
        return p;
 }
 
@@ -2795,7 +2857,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
 
        do {
                if (unlikely(n)) {
-                       spin_unlock_irqrestore(&n->list_lock, flags);
+                       raw_spin_unlock_irqrestore(&n->list_lock, flags);
                        n = NULL;
                }
                prior = page->freelist;
@@ -2827,7 +2889,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                                 * Otherwise the list_lock will synchronize with
                                 * other processors updating the list of slabs.
                                 */
-                               spin_lock_irqsave(&n->list_lock, flags);
+                               raw_spin_lock_irqsave(&n->list_lock, flags);
 
                        }
                }
@@ -2869,7 +2931,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                add_partial(n, page, DEACTIVATE_TO_TAIL);
                stat(s, FREE_ADD_PARTIAL);
        }
-       spin_unlock_irqrestore(&n->list_lock, flags);
+       raw_spin_unlock_irqrestore(&n->list_lock, flags);
        return;
 
 slab_empty:
@@ -2884,7 +2946,7 @@ slab_empty:
                remove_full(s, n, page);
        }
 
-       spin_unlock_irqrestore(&n->list_lock, flags);
+       raw_spin_unlock_irqrestore(&n->list_lock, flags);
        stat(s, FREE_SLAB);
        discard_slab(s, page);
 }
@@ -3089,6 +3151,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
                          void **p)
 {
        struct kmem_cache_cpu *c;
+       LIST_HEAD(to_free);
        int i;
 
        /* memcg and kmem_cache debug support */
@@ -3112,7 +3175,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
                         * of re-populating per CPU c->freelist
                         */
                        p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
-                                           _RET_IP_, c);
+                                           _RET_IP_, c, &to_free);
                        if (unlikely(!p[i]))
                                goto error;
 
@@ -3124,6 +3187,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
        }
        c->tid = next_tid(c->tid);
        local_irq_enable();
+       free_delayed(&to_free);
 
        /* Clear memory outside IRQ disabled fastpath loop */
        if (unlikely(flags & __GFP_ZERO)) {
@@ -3271,7 +3335,7 @@ static void
 init_kmem_cache_node(struct kmem_cache_node *n)
 {
        n->nr_partial = 0;
-       spin_lock_init(&n->list_lock);
+       raw_spin_lock_init(&n->list_lock);
        INIT_LIST_HEAD(&n->partial);
 #ifdef CONFIG_SLUB_DEBUG
        atomic_long_set(&n->nr_slabs, 0);
@@ -3615,6 +3679,10 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
                                                        const char *text)
 {
 #ifdef CONFIG_SLUB_DEBUG
+#ifdef CONFIG_PREEMPT_RT_BASE
+       /* XXX move out of irq-off section */
+       slab_err(s, page, text, s->name);
+#else
        void *addr = page_address(page);
        void *p;
        unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
@@ -3635,6 +3703,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
        slab_unlock(page);
        kfree(map);
 #endif
+#endif
 }
 
 /*
@@ -3648,7 +3717,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
        struct page *page, *h;
 
        BUG_ON(irqs_disabled());
-       spin_lock_irq(&n->list_lock);
+       raw_spin_lock_irq(&n->list_lock);
        list_for_each_entry_safe(page, h, &n->partial, lru) {
                if (!page->inuse) {
                        remove_partial(n, page);
@@ -3658,7 +3727,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
                        "Objects remaining in %s on __kmem_cache_shutdown()");
                }
        }
-       spin_unlock_irq(&n->list_lock);
+       raw_spin_unlock_irq(&n->list_lock);
 
        list_for_each_entry_safe(page, h, &discard, lru)
                discard_slab(s, page);
@@ -3916,7 +3985,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
                for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
                        INIT_LIST_HEAD(promote + i);
 
-               spin_lock_irqsave(&n->list_lock, flags);
+               raw_spin_lock_irqsave(&n->list_lock, flags);
 
                /*
                 * Build lists of slabs to discard or promote.
@@ -3947,7 +4016,7 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
                for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
                        list_splice(promote + i, &n->partial);
 
-               spin_unlock_irqrestore(&n->list_lock, flags);
+               raw_spin_unlock_irqrestore(&n->list_lock, flags);
 
                /* Release empty slabs */
                list_for_each_entry_safe(page, t, &discard, lru)
@@ -4123,6 +4192,12 @@ void __init kmem_cache_init(void)
 {
        static __initdata struct kmem_cache boot_kmem_cache,
                boot_kmem_cache_node;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
+               INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
+       }
 
        if (debug_guardpage_minorder())
                slub_max_order = 0;
@@ -4331,7 +4406,7 @@ static int validate_slab_node(struct kmem_cache *s,
        struct page *page;
        unsigned long flags;
 
-       spin_lock_irqsave(&n->list_lock, flags);
+       raw_spin_lock_irqsave(&n->list_lock, flags);
 
        list_for_each_entry(page, &n->partial, lru) {
                validate_slab_slab(s, page, map);
@@ -4353,7 +4428,7 @@ static int validate_slab_node(struct kmem_cache *s,
                       s->name, count, atomic_long_read(&n->nr_slabs));
 
 out:
-       spin_unlock_irqrestore(&n->list_lock, flags);
+       raw_spin_unlock_irqrestore(&n->list_lock, flags);
        return count;
 }
 
@@ -4541,12 +4616,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
                if (!atomic_long_read(&n->nr_slabs))
                        continue;
 
-               spin_lock_irqsave(&n->list_lock, flags);
+               raw_spin_lock_irqsave(&n->list_lock, flags);
                list_for_each_entry(page, &n->partial, lru)
                        process_slab(&t, s, page, alloc, map);
                list_for_each_entry(page, &n->full, lru)
                        process_slab(&t, s, page, alloc, map);
-               spin_unlock_irqrestore(&n->list_lock, flags);
+               raw_spin_unlock_irqrestore(&n->list_lock, flags);
        }
 
        for (i = 0; i < t.count; i++) {