]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - mm/page_alloc.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / mm / page_alloc.c
index 63f199256e6515edc627c41cb568c2e361655289..c541c1cfb04ec90317d6bca673b35e38a53da861 100644 (file)
@@ -61,6 +61,7 @@
 #include <linux/page_ext.h>
 #include <linux/hugetlb.h>
 #include <linux/sched/rt.h>
+#include <linux/locallock.h>
 #include <linux/page_owner.h>
 #include <linux/kthread.h>
 #include <linux/memcontrol.h>
@@ -281,6 +282,18 @@ EXPORT_SYMBOL(nr_node_ids);
 EXPORT_SYMBOL(nr_online_nodes);
 #endif
 
+static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define cpu_lock_irqsave(cpu, flags)          \
+       local_lock_irqsave_on(pa_lock, flags, cpu)
+# define cpu_unlock_irqrestore(cpu, flags)     \
+       local_unlock_irqrestore_on(pa_lock, flags, cpu)
+#else
+# define cpu_lock_irqsave(cpu, flags)          local_irq_save(flags)
+# define cpu_unlock_irqrestore(cpu, flags)     local_irq_restore(flags)
+#endif
+
 int page_group_by_mobility_disabled __read_mostly;
 
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
@@ -1072,7 +1085,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
 #endif /* CONFIG_DEBUG_VM */
 
 /*
- * Frees a number of pages from the PCP lists
+ * Frees a number of pages which have been collected from the pcp lists.
  * Assumes all pages on list are in same zone, and of same order.
  * count is the number of pages to free.
  *
@@ -1083,19 +1096,58 @@ static bool bulkfree_pcp_prepare(struct page *page)
  * pinned" detection logic.
  */
 static void free_pcppages_bulk(struct zone *zone, int count,
-                                       struct per_cpu_pages *pcp)
+                              struct list_head *list)
 {
-       int migratetype = 0;
-       int batch_free = 0;
        unsigned long nr_scanned;
        bool isolated_pageblocks;
+       unsigned long flags;
+
+       spin_lock_irqsave(&zone->lock, flags);
 
-       spin_lock(&zone->lock);
        isolated_pageblocks = has_isolate_pageblock(zone);
        nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
        if (nr_scanned)
                __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
 
+       while (!list_empty(list)) {
+               struct page *page;
+               int mt; /* migratetype of the to-be-freed page */
+
+               page = list_first_entry(list, struct page, lru);
+               /* must delete as __free_one_page list manipulates */
+               list_del(&page->lru);
+
+               mt = get_pcppage_migratetype(page);
+               /* MIGRATE_ISOLATE page should not go to pcplists */
+               VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
+               /* Pageblock could have been isolated meanwhile */
+               if (unlikely(isolated_pageblocks))
+                       mt = get_pageblock_migratetype(page);
+
+               if (bulkfree_pcp_prepare(page))
+                       continue;
+
+               __free_one_page(page, page_to_pfn(page), zone, 0, mt);
+               trace_mm_page_pcpu_drain(page, 0, mt);
+               count--;
+       }
+       WARN_ON(count != 0);
+       spin_unlock_irqrestore(&zone->lock, flags);
+}
+
+/*
+ * Moves a number of pages from the PCP lists to free list which
+ * is freed outside of the locked region.
+ *
+ * Assumes all pages on list are in same zone, and of same order.
+ * count is the number of pages to free.
+ */
+static void isolate_pcp_pages(int count, struct per_cpu_pages *src,
+                             struct list_head *dst)
+{
+       int migratetype = 0;
+       int batch_free = 0;
+
        while (count) {
                struct page *page;
                struct list_head *list;
@@ -1111,7 +1163,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        batch_free++;
                        if (++migratetype == MIGRATE_PCPTYPES)
                                migratetype = 0;
-                       list = &pcp->lists[migratetype];
+                       list = &src->lists[migratetype];
                } while (list_empty(list));
 
                /* This is the only non-empty list. Free them all. */
@@ -1119,27 +1171,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        batch_free = count;
 
                do {
-                       int mt; /* migratetype of the to-be-freed page */
-
                        page = list_last_entry(list, struct page, lru);
-                       /* must delete as __free_one_page list manipulates */
                        list_del(&page->lru);
 
-                       mt = get_pcppage_migratetype(page);
-                       /* MIGRATE_ISOLATE page should not go to pcplists */
-                       VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
-                       /* Pageblock could have been isolated meanwhile */
-                       if (unlikely(isolated_pageblocks))
-                               mt = get_pageblock_migratetype(page);
-
-                       if (bulkfree_pcp_prepare(page))
-                               continue;
-
-                       __free_one_page(page, page_to_pfn(page), zone, 0, mt);
-                       trace_mm_page_pcpu_drain(page, 0, mt);
+                       list_add(&page->lru, dst);
                } while (--count && --batch_free && !list_empty(list));
        }
-       spin_unlock(&zone->lock);
 }
 
 static void free_one_page(struct zone *zone,
@@ -1148,7 +1185,9 @@ static void free_one_page(struct zone *zone,
                                int migratetype)
 {
        unsigned long nr_scanned;
-       spin_lock(&zone->lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&zone->lock, flags);
        nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
        if (nr_scanned)
                __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
@@ -1158,7 +1197,7 @@ static void free_one_page(struct zone *zone,
                migratetype = get_pfnblock_migratetype(page, pfn);
        }
        __free_one_page(page, pfn, zone, order, migratetype);
-       spin_unlock(&zone->lock);
+       spin_unlock_irqrestore(&zone->lock, flags);
 }
 
 static void __meminit __init_single_page(struct page *page, unsigned long pfn,
@@ -1244,10 +1283,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
                return;
 
        migratetype = get_pfnblock_migratetype(page, pfn);
-       local_irq_save(flags);
+       local_lock_irqsave(pa_lock, flags);
        __count_vm_events(PGFREE, 1 << order);
        free_one_page(page_zone(page), page, pfn, order, migratetype);
-       local_irq_restore(flags);
+       local_unlock_irqrestore(pa_lock, flags);
 }
 
 static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -2238,16 +2277,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
 {
        unsigned long flags;
+       LIST_HEAD(dst);
        int to_drain, batch;
 
-       local_irq_save(flags);
+       local_lock_irqsave(pa_lock, flags);
        batch = READ_ONCE(pcp->batch);
        to_drain = min(pcp->count, batch);
        if (to_drain > 0) {
-               free_pcppages_bulk(zone, to_drain, pcp);
+               isolate_pcp_pages(to_drain, pcp, &dst);
                pcp->count -= to_drain;
        }
-       local_irq_restore(flags);
+       local_unlock_irqrestore(pa_lock, flags);
+       free_pcppages_bulk(zone, to_drain, &dst);
 }
 #endif
 
@@ -2263,16 +2304,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
        unsigned long flags;
        struct per_cpu_pageset *pset;
        struct per_cpu_pages *pcp;
+       LIST_HEAD(dst);
+       int count;
 
-       local_irq_save(flags);
+       cpu_lock_irqsave(cpu, flags);
        pset = per_cpu_ptr(zone->pageset, cpu);
 
        pcp = &pset->pcp;
-       if (pcp->count) {
-               free_pcppages_bulk(zone, pcp->count, pcp);
+       count = pcp->count;
+       if (count) {
+               isolate_pcp_pages(count, pcp, &dst);
                pcp->count = 0;
        }
-       local_irq_restore(flags);
+       cpu_unlock_irqrestore(cpu, flags);
+       if (count)
+               free_pcppages_bulk(zone, count, &dst);
 }
 
 /*
@@ -2358,8 +2404,17 @@ void drain_all_pages(struct zone *zone)
                else
                        cpumask_clear_cpu(cpu, &cpus_with_pcps);
        }
+#ifndef CONFIG_PREEMPT_RT_BASE
        on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
                                                                zone, 1);
+#else
+       for_each_cpu(cpu, &cpus_with_pcps) {
+               if (zone)
+                       drain_pages_zone(cpu, zone);
+               else
+                       drain_pages(cpu);
+       }
+#endif
 }
 
 #ifdef CONFIG_HIBERNATION
@@ -2419,7 +2474,7 @@ void free_hot_cold_page(struct page *page, bool cold)
 
        migratetype = get_pfnblock_migratetype(page, pfn);
        set_pcppage_migratetype(page, migratetype);
-       local_irq_save(flags);
+       local_lock_irqsave(pa_lock, flags);
        __count_vm_event(PGFREE);
 
        /*
@@ -2445,12 +2500,17 @@ void free_hot_cold_page(struct page *page, bool cold)
        pcp->count++;
        if (pcp->count >= pcp->high) {
                unsigned long batch = READ_ONCE(pcp->batch);
-               free_pcppages_bulk(zone, batch, pcp);
+               LIST_HEAD(dst);
+
+               isolate_pcp_pages(batch, pcp, &dst);
                pcp->count -= batch;
+               local_unlock_irqrestore(pa_lock, flags);
+               free_pcppages_bulk(zone, batch, &dst);
+               return;
        }
 
 out:
-       local_irq_restore(flags);
+       local_unlock_irqrestore(pa_lock, flags);
 }
 
 /*
@@ -2592,7 +2652,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                struct per_cpu_pages *pcp;
                struct list_head *list;
 
-               local_irq_save(flags);
+               local_lock_irqsave(pa_lock, flags);
                do {
                        pcp = &this_cpu_ptr(zone->pageset)->pcp;
                        list = &pcp->lists[migratetype];
@@ -2619,7 +2679,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                 * allocate greater than order-1 page units with __GFP_NOFAIL.
                 */
                WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
-               spin_lock_irqsave(&zone->lock, flags);
+               local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
 
                do {
                        page = NULL;
@@ -2631,22 +2691,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                        if (!page)
                                page = __rmqueue(zone, order, migratetype);
                } while (page && check_new_pages(page, order));
-               spin_unlock(&zone->lock);
-               if (!page)
+               if (!page) {
+                       spin_unlock(&zone->lock);
                        goto failed;
+               }
                __mod_zone_freepage_state(zone, -(1 << order),
                                          get_pcppage_migratetype(page));
+               spin_unlock(&zone->lock);
        }
 
        __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
        zone_statistics(preferred_zone, zone, gfp_flags);
-       local_irq_restore(flags);
+       local_unlock_irqrestore(pa_lock, flags);
 
        VM_BUG_ON_PAGE(bad_range(zone, page), page);
        return page;
 
 failed:
-       local_irq_restore(flags);
+       local_unlock_irqrestore(pa_lock, flags);
        return NULL;
 }
 
@@ -6497,7 +6559,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
        int cpu = (unsigned long)hcpu;
 
        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+               local_lock_irq_on(swapvec_lock, cpu);
                lru_add_drain_cpu(cpu);
+               local_unlock_irq_on(swapvec_lock, cpu);
                drain_pages(cpu);
 
                /*
@@ -6523,6 +6587,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
 void __init page_alloc_init(void)
 {
        hotcpu_notifier(page_alloc_cpu_notify, 0);
+       local_irq_lock_init(pa_lock);
 }
 
 /*
@@ -7351,7 +7416,7 @@ void zone_pcp_reset(struct zone *zone)
        struct per_cpu_pageset *pset;
 
        /* avoid races with drain_pages()  */
-       local_irq_save(flags);
+       local_lock_irqsave(pa_lock, flags);
        if (zone->pageset != &boot_pageset) {
                for_each_online_cpu(cpu) {
                        pset = per_cpu_ptr(zone->pageset, cpu);
@@ -7360,7 +7425,7 @@ void zone_pcp_reset(struct zone *zone)
                free_percpu(zone->pageset);
                zone->pageset = &boot_pageset;
        }
-       local_irq_restore(flags);
+       local_unlock_irqrestore(pa_lock, flags);
 }
 
 #ifdef CONFIG_MEMORY_HOTREMOVE