]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blob - mm/memory_hotplug.c
memory-hotplug: allocate zone's pcp before onlining pages
[can-eth-gw-linux.git] / mm / memory_hotplug.c
1 /*
2  *  linux/mm/memory_hotplug.c
3  *
4  *  Copyright (C)
5  */
6
7 #include <linux/stddef.h>
8 #include <linux/mm.h>
9 #include <linux/swap.h>
10 #include <linux/interrupt.h>
11 #include <linux/pagemap.h>
12 #include <linux/bootmem.h>
13 #include <linux/compiler.h>
14 #include <linux/export.h>
15 #include <linux/pagevec.h>
16 #include <linux/writeback.h>
17 #include <linux/slab.h>
18 #include <linux/sysctl.h>
19 #include <linux/cpu.h>
20 #include <linux/memory.h>
21 #include <linux/memory_hotplug.h>
22 #include <linux/highmem.h>
23 #include <linux/vmalloc.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
26 #include <linux/migrate.h>
27 #include <linux/page-isolation.h>
28 #include <linux/pfn.h>
29 #include <linux/suspend.h>
30 #include <linux/mm_inline.h>
31 #include <linux/firmware-map.h>
32
33 #include <asm/tlbflush.h>
34
35 #include "internal.h"
36
37 /*
38  * online_page_callback contains pointer to current page onlining function.
39  * Initially it is generic_online_page(). If it is required it could be
40  * changed by calling set_online_page_callback() for callback registration
41  * and restore_online_page_callback() for generic callback restore.
42  */
43
44 static void generic_online_page(struct page *page);
45
46 static online_page_callback_t online_page_callback = generic_online_page;
47
48 DEFINE_MUTEX(mem_hotplug_mutex);
49
50 void lock_memory_hotplug(void)
51 {
52         mutex_lock(&mem_hotplug_mutex);
53
54         /* for exclusive hibernation if CONFIG_HIBERNATION=y */
55         lock_system_sleep();
56 }
57
58 void unlock_memory_hotplug(void)
59 {
60         unlock_system_sleep();
61         mutex_unlock(&mem_hotplug_mutex);
62 }
63
64
65 /* add this memory to iomem resource */
66 static struct resource *register_memory_resource(u64 start, u64 size)
67 {
68         struct resource *res;
69         res = kzalloc(sizeof(struct resource), GFP_KERNEL);
70         BUG_ON(!res);
71
72         res->name = "System RAM";
73         res->start = start;
74         res->end = start + size - 1;
75         res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
76         if (request_resource(&iomem_resource, res) < 0) {
77                 printk("System RAM resource %pR cannot be added\n", res);
78                 kfree(res);
79                 res = NULL;
80         }
81         return res;
82 }
83
84 static void release_memory_resource(struct resource *res)
85 {
86         if (!res)
87                 return;
88         release_resource(res);
89         kfree(res);
90         return;
91 }
92
93 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
94 #ifndef CONFIG_SPARSEMEM_VMEMMAP
95 static void get_page_bootmem(unsigned long info,  struct page *page,
96                              unsigned long type)
97 {
98         page->lru.next = (struct list_head *) type;
99         SetPagePrivate(page);
100         set_page_private(page, info);
101         atomic_inc(&page->_count);
102 }
103
104 /* reference to __meminit __free_pages_bootmem is valid
105  * so use __ref to tell modpost not to generate a warning */
106 void __ref put_page_bootmem(struct page *page)
107 {
108         unsigned long type;
109
110         type = (unsigned long) page->lru.next;
111         BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
112                type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
113
114         if (atomic_dec_return(&page->_count) == 1) {
115                 ClearPagePrivate(page);
116                 set_page_private(page, 0);
117                 INIT_LIST_HEAD(&page->lru);
118                 __free_pages_bootmem(page, 0);
119         }
120
121 }
122
123 static void register_page_bootmem_info_section(unsigned long start_pfn)
124 {
125         unsigned long *usemap, mapsize, section_nr, i;
126         struct mem_section *ms;
127         struct page *page, *memmap;
128
129         section_nr = pfn_to_section_nr(start_pfn);
130         ms = __nr_to_section(section_nr);
131
132         /* Get section's memmap address */
133         memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
134
135         /*
136          * Get page for the memmap's phys address
137          * XXX: need more consideration for sparse_vmemmap...
138          */
139         page = virt_to_page(memmap);
140         mapsize = sizeof(struct page) * PAGES_PER_SECTION;
141         mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
142
143         /* remember memmap's page */
144         for (i = 0; i < mapsize; i++, page++)
145                 get_page_bootmem(section_nr, page, SECTION_INFO);
146
147         usemap = __nr_to_section(section_nr)->pageblock_flags;
148         page = virt_to_page(usemap);
149
150         mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
151
152         for (i = 0; i < mapsize; i++, page++)
153                 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
154
155 }
156
157 void register_page_bootmem_info_node(struct pglist_data *pgdat)
158 {
159         unsigned long i, pfn, end_pfn, nr_pages;
160         int node = pgdat->node_id;
161         struct page *page;
162         struct zone *zone;
163
164         nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
165         page = virt_to_page(pgdat);
166
167         for (i = 0; i < nr_pages; i++, page++)
168                 get_page_bootmem(node, page, NODE_INFO);
169
170         zone = &pgdat->node_zones[0];
171         for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
172                 if (zone->wait_table) {
173                         nr_pages = zone->wait_table_hash_nr_entries
174                                 * sizeof(wait_queue_head_t);
175                         nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
176                         page = virt_to_page(zone->wait_table);
177
178                         for (i = 0; i < nr_pages; i++, page++)
179                                 get_page_bootmem(node, page, NODE_INFO);
180                 }
181         }
182
183         pfn = pgdat->node_start_pfn;
184         end_pfn = pfn + pgdat->node_spanned_pages;
185
186         /* register_section info */
187         for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
188                 /*
189                  * Some platforms can assign the same pfn to multiple nodes - on
190                  * node0 as well as nodeN.  To avoid registering a pfn against
191                  * multiple nodes we check that this pfn does not already
192                  * reside in some other node.
193                  */
194                 if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
195                         register_page_bootmem_info_section(pfn);
196         }
197 }
198 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
199
200 static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
201                            unsigned long end_pfn)
202 {
203         unsigned long old_zone_end_pfn;
204
205         zone_span_writelock(zone);
206
207         old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
208         if (start_pfn < zone->zone_start_pfn)
209                 zone->zone_start_pfn = start_pfn;
210
211         zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
212                                 zone->zone_start_pfn;
213
214         zone_span_writeunlock(zone);
215 }
216
217 static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
218                             unsigned long end_pfn)
219 {
220         unsigned long old_pgdat_end_pfn =
221                 pgdat->node_start_pfn + pgdat->node_spanned_pages;
222
223         if (start_pfn < pgdat->node_start_pfn)
224                 pgdat->node_start_pfn = start_pfn;
225
226         pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
227                                         pgdat->node_start_pfn;
228 }
229
230 static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
231 {
232         struct pglist_data *pgdat = zone->zone_pgdat;
233         int nr_pages = PAGES_PER_SECTION;
234         int nid = pgdat->node_id;
235         int zone_type;
236         unsigned long flags;
237
238         zone_type = zone - pgdat->node_zones;
239         if (!zone->wait_table) {
240                 int ret;
241
242                 ret = init_currently_empty_zone(zone, phys_start_pfn,
243                                                 nr_pages, MEMMAP_HOTPLUG);
244                 if (ret)
245                         return ret;
246         }
247         pgdat_resize_lock(zone->zone_pgdat, &flags);
248         grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
249         grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
250                         phys_start_pfn + nr_pages);
251         pgdat_resize_unlock(zone->zone_pgdat, &flags);
252         memmap_init_zone(nr_pages, nid, zone_type,
253                          phys_start_pfn, MEMMAP_HOTPLUG);
254         return 0;
255 }
256
257 static int __meminit __add_section(int nid, struct zone *zone,
258                                         unsigned long phys_start_pfn)
259 {
260         int nr_pages = PAGES_PER_SECTION;
261         int ret;
262
263         if (pfn_valid(phys_start_pfn))
264                 return -EEXIST;
265
266         ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
267
268         if (ret < 0)
269                 return ret;
270
271         ret = __add_zone(zone, phys_start_pfn);
272
273         if (ret < 0)
274                 return ret;
275
276         return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
277 }
278
279 #ifdef CONFIG_SPARSEMEM_VMEMMAP
280 static int __remove_section(struct zone *zone, struct mem_section *ms)
281 {
282         /*
283          * XXX: Freeing memmap with vmemmap is not implement yet.
284          *      This should be removed later.
285          */
286         return -EBUSY;
287 }
288 #else
289 static int __remove_section(struct zone *zone, struct mem_section *ms)
290 {
291         unsigned long flags;
292         struct pglist_data *pgdat = zone->zone_pgdat;
293         int ret = -EINVAL;
294
295         if (!valid_section(ms))
296                 return ret;
297
298         ret = unregister_memory_section(ms);
299         if (ret)
300                 return ret;
301
302         pgdat_resize_lock(pgdat, &flags);
303         sparse_remove_one_section(zone, ms);
304         pgdat_resize_unlock(pgdat, &flags);
305         return 0;
306 }
307 #endif
308
309 /*
310  * Reasonably generic function for adding memory.  It is
311  * expected that archs that support memory hotplug will
312  * call this function after deciding the zone to which to
313  * add the new pages.
314  */
315 int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
316                         unsigned long nr_pages)
317 {
318         unsigned long i;
319         int err = 0;
320         int start_sec, end_sec;
321         /* during initialize mem_map, align hot-added range to section */
322         start_sec = pfn_to_section_nr(phys_start_pfn);
323         end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
324
325         for (i = start_sec; i <= end_sec; i++) {
326                 err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
327
328                 /*
329                  * EEXIST is finally dealt with by ioresource collision
330                  * check. see add_memory() => register_memory_resource()
331                  * Warning will be printed if there is collision.
332                  */
333                 if (err && (err != -EEXIST))
334                         break;
335                 err = 0;
336         }
337
338         return err;
339 }
340 EXPORT_SYMBOL_GPL(__add_pages);
341
342 /**
343  * __remove_pages() - remove sections of pages from a zone
344  * @zone: zone from which pages need to be removed
345  * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
346  * @nr_pages: number of pages to remove (must be multiple of section size)
347  *
348  * Generic helper function to remove section mappings and sysfs entries
349  * for the section of the memory we are removing. Caller needs to make
350  * sure that pages are marked reserved and zones are adjust properly by
351  * calling offline_pages().
352  */
353 int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
354                  unsigned long nr_pages)
355 {
356         unsigned long i, ret = 0;
357         int sections_to_remove;
358
359         /*
360          * We can only remove entire sections
361          */
362         BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
363         BUG_ON(nr_pages % PAGES_PER_SECTION);
364
365         release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE);
366
367         sections_to_remove = nr_pages / PAGES_PER_SECTION;
368         for (i = 0; i < sections_to_remove; i++) {
369                 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
370                 ret = __remove_section(zone, __pfn_to_section(pfn));
371                 if (ret)
372                         break;
373         }
374         return ret;
375 }
376 EXPORT_SYMBOL_GPL(__remove_pages);
377
378 int set_online_page_callback(online_page_callback_t callback)
379 {
380         int rc = -EINVAL;
381
382         lock_memory_hotplug();
383
384         if (online_page_callback == generic_online_page) {
385                 online_page_callback = callback;
386                 rc = 0;
387         }
388
389         unlock_memory_hotplug();
390
391         return rc;
392 }
393 EXPORT_SYMBOL_GPL(set_online_page_callback);
394
395 int restore_online_page_callback(online_page_callback_t callback)
396 {
397         int rc = -EINVAL;
398
399         lock_memory_hotplug();
400
401         if (online_page_callback == callback) {
402                 online_page_callback = generic_online_page;
403                 rc = 0;
404         }
405
406         unlock_memory_hotplug();
407
408         return rc;
409 }
410 EXPORT_SYMBOL_GPL(restore_online_page_callback);
411
412 void __online_page_set_limits(struct page *page)
413 {
414         unsigned long pfn = page_to_pfn(page);
415
416         if (pfn >= num_physpages)
417                 num_physpages = pfn + 1;
418 }
419 EXPORT_SYMBOL_GPL(__online_page_set_limits);
420
421 void __online_page_increment_counters(struct page *page)
422 {
423         totalram_pages++;
424
425 #ifdef CONFIG_HIGHMEM
426         if (PageHighMem(page))
427                 totalhigh_pages++;
428 #endif
429 }
430 EXPORT_SYMBOL_GPL(__online_page_increment_counters);
431
432 void __online_page_free(struct page *page)
433 {
434         ClearPageReserved(page);
435         init_page_count(page);
436         __free_page(page);
437 }
438 EXPORT_SYMBOL_GPL(__online_page_free);
439
440 static void generic_online_page(struct page *page)
441 {
442         __online_page_set_limits(page);
443         __online_page_increment_counters(page);
444         __online_page_free(page);
445 }
446
447 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
448                         void *arg)
449 {
450         unsigned long i;
451         unsigned long onlined_pages = *(unsigned long *)arg;
452         struct page *page;
453         if (PageReserved(pfn_to_page(start_pfn)))
454                 for (i = 0; i < nr_pages; i++) {
455                         page = pfn_to_page(start_pfn + i);
456                         (*online_page_callback)(page);
457                         onlined_pages++;
458                 }
459         *(unsigned long *)arg = onlined_pages;
460         return 0;
461 }
462
463
464 int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
465 {
466         unsigned long onlined_pages = 0;
467         struct zone *zone;
468         int need_zonelists_rebuild = 0;
469         int nid;
470         int ret;
471         struct memory_notify arg;
472
473         lock_memory_hotplug();
474         arg.start_pfn = pfn;
475         arg.nr_pages = nr_pages;
476         arg.status_change_nid = -1;
477
478         nid = page_to_nid(pfn_to_page(pfn));
479         if (node_present_pages(nid) == 0)
480                 arg.status_change_nid = nid;
481
482         ret = memory_notify(MEM_GOING_ONLINE, &arg);
483         ret = notifier_to_errno(ret);
484         if (ret) {
485                 memory_notify(MEM_CANCEL_ONLINE, &arg);
486                 unlock_memory_hotplug();
487                 return ret;
488         }
489         /*
490          * This doesn't need a lock to do pfn_to_page().
491          * The section can't be removed here because of the
492          * memory_block->state_mutex.
493          */
494         zone = page_zone(pfn_to_page(pfn));
495         /*
496          * If this zone is not populated, then it is not in zonelist.
497          * This means the page allocator ignores this zone.
498          * So, zonelist must be updated after online.
499          */
500         mutex_lock(&zonelists_mutex);
501         if (!populated_zone(zone)) {
502                 need_zonelists_rebuild = 1;
503                 build_all_zonelists(NULL, zone);
504         }
505
506         ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
507                 online_pages_range);
508         if (ret) {
509                 if (need_zonelists_rebuild)
510                         zone_pcp_reset(zone);
511                 mutex_unlock(&zonelists_mutex);
512                 printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
513                        (unsigned long long) pfn << PAGE_SHIFT,
514                        (((unsigned long long) pfn + nr_pages)
515                             << PAGE_SHIFT) - 1);
516                 memory_notify(MEM_CANCEL_ONLINE, &arg);
517                 unlock_memory_hotplug();
518                 return ret;
519         }
520
521         zone->present_pages += onlined_pages;
522         zone->zone_pgdat->node_present_pages += onlined_pages;
523         if (onlined_pages) {
524                 node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
525                 if (need_zonelists_rebuild)
526                         build_all_zonelists(NULL, NULL);
527                 else
528                         zone_pcp_update(zone);
529         }
530
531         mutex_unlock(&zonelists_mutex);
532
533         init_per_zone_wmark_min();
534
535         if (onlined_pages)
536                 kswapd_run(zone_to_nid(zone));
537
538         vm_total_pages = nr_free_pagecache_pages();
539
540         writeback_set_ratelimit();
541
542         if (onlined_pages)
543                 memory_notify(MEM_ONLINE, &arg);
544         unlock_memory_hotplug();
545
546         return 0;
547 }
548 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
549
550 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
551 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
552 {
553         struct pglist_data *pgdat;
554         unsigned long zones_size[MAX_NR_ZONES] = {0};
555         unsigned long zholes_size[MAX_NR_ZONES] = {0};
556         unsigned long start_pfn = start >> PAGE_SHIFT;
557
558         pgdat = arch_alloc_nodedata(nid);
559         if (!pgdat)
560                 return NULL;
561
562         arch_refresh_nodedata(nid, pgdat);
563
564         /* we can use NODE_DATA(nid) from here */
565
566         /* init node's zones as empty zones, we don't have any present pages.*/
567         free_area_init_node(nid, zones_size, start_pfn, zholes_size);
568
569         /*
570          * The node we allocated has no zone fallback lists. For avoiding
571          * to access not-initialized zonelist, build here.
572          */
573         mutex_lock(&zonelists_mutex);
574         build_all_zonelists(pgdat, NULL);
575         mutex_unlock(&zonelists_mutex);
576
577         return pgdat;
578 }
579
580 static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
581 {
582         arch_refresh_nodedata(nid, NULL);
583         arch_free_nodedata(pgdat);
584         return;
585 }
586
587
588 /*
589  * called by cpu_up() to online a node without onlined memory.
590  */
591 int mem_online_node(int nid)
592 {
593         pg_data_t       *pgdat;
594         int     ret;
595
596         lock_memory_hotplug();
597         pgdat = hotadd_new_pgdat(nid, 0);
598         if (!pgdat) {
599                 ret = -ENOMEM;
600                 goto out;
601         }
602         node_set_online(nid);
603         ret = register_one_node(nid);
604         BUG_ON(ret);
605
606 out:
607         unlock_memory_hotplug();
608         return ret;
609 }
610
611 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
612 int __ref add_memory(int nid, u64 start, u64 size)
613 {
614         pg_data_t *pgdat = NULL;
615         int new_pgdat = 0;
616         struct resource *res;
617         int ret;
618
619         lock_memory_hotplug();
620
621         res = register_memory_resource(start, size);
622         ret = -EEXIST;
623         if (!res)
624                 goto out;
625
626         if (!node_online(nid)) {
627                 pgdat = hotadd_new_pgdat(nid, start);
628                 ret = -ENOMEM;
629                 if (!pgdat)
630                         goto error;
631                 new_pgdat = 1;
632         }
633
634         /* call arch's memory hotadd */
635         ret = arch_add_memory(nid, start, size);
636
637         if (ret < 0)
638                 goto error;
639
640         /* we online node here. we can't roll back from here. */
641         node_set_online(nid);
642
643         if (new_pgdat) {
644                 ret = register_one_node(nid);
645                 /*
646                  * If sysfs file of new node can't create, cpu on the node
647                  * can't be hot-added. There is no rollback way now.
648                  * So, check by BUG_ON() to catch it reluctantly..
649                  */
650                 BUG_ON(ret);
651         }
652
653         /* create new memmap entry */
654         firmware_map_add_hotplug(start, start + size, "System RAM");
655
656         goto out;
657
658 error:
659         /* rollback pgdat allocation and others */
660         if (new_pgdat)
661                 rollback_node_hotadd(nid, pgdat);
662         if (res)
663                 release_memory_resource(res);
664
665 out:
666         unlock_memory_hotplug();
667         return ret;
668 }
669 EXPORT_SYMBOL_GPL(add_memory);
670
671 #ifdef CONFIG_MEMORY_HOTREMOVE
672 /*
673  * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
674  * set and the size of the free page is given by page_order(). Using this,
675  * the function determines if the pageblock contains only free pages.
676  * Due to buddy contraints, a free page at least the size of a pageblock will
677  * be located at the start of the pageblock
678  */
679 static inline int pageblock_free(struct page *page)
680 {
681         return PageBuddy(page) && page_order(page) >= pageblock_order;
682 }
683
684 /* Return the start of the next active pageblock after a given page */
685 static struct page *next_active_pageblock(struct page *page)
686 {
687         /* Ensure the starting page is pageblock-aligned */
688         BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
689
690         /* If the entire pageblock is free, move to the end of free page */
691         if (pageblock_free(page)) {
692                 int order;
693                 /* be careful. we don't have locks, page_order can be changed.*/
694                 order = page_order(page);
695                 if ((order < MAX_ORDER) && (order >= pageblock_order))
696                         return page + (1 << order);
697         }
698
699         return page + pageblock_nr_pages;
700 }
701
702 /* Checks if this range of memory is likely to be hot-removable. */
703 int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
704 {
705         struct page *page = pfn_to_page(start_pfn);
706         struct page *end_page = page + nr_pages;
707
708         /* Check the starting page of each pageblock within the range */
709         for (; page < end_page; page = next_active_pageblock(page)) {
710                 if (!is_pageblock_removable_nolock(page))
711                         return 0;
712                 cond_resched();
713         }
714
715         /* All pageblocks in the memory block are likely to be hot-removable */
716         return 1;
717 }
718
719 /*
720  * Confirm all pages in a range [start, end) is belongs to the same zone.
721  */
722 static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
723 {
724         unsigned long pfn;
725         struct zone *zone = NULL;
726         struct page *page;
727         int i;
728         for (pfn = start_pfn;
729              pfn < end_pfn;
730              pfn += MAX_ORDER_NR_PAGES) {
731                 i = 0;
732                 /* This is just a CONFIG_HOLES_IN_ZONE check.*/
733                 while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
734                         i++;
735                 if (i == MAX_ORDER_NR_PAGES)
736                         continue;
737                 page = pfn_to_page(pfn + i);
738                 if (zone && page_zone(page) != zone)
739                         return 0;
740                 zone = page_zone(page);
741         }
742         return 1;
743 }
744
745 /*
746  * Scanning pfn is much easier than scanning lru list.
747  * Scan pfn from start to end and Find LRU page.
748  */
749 static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
750 {
751         unsigned long pfn;
752         struct page *page;
753         for (pfn = start; pfn < end; pfn++) {
754                 if (pfn_valid(pfn)) {
755                         page = pfn_to_page(pfn);
756                         if (PageLRU(page))
757                                 return pfn;
758                 }
759         }
760         return 0;
761 }
762
763 #define NR_OFFLINE_AT_ONCE_PAGES        (256)
764 static int
765 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
766 {
767         unsigned long pfn;
768         struct page *page;
769         int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
770         int not_managed = 0;
771         int ret = 0;
772         LIST_HEAD(source);
773
774         for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
775                 if (!pfn_valid(pfn))
776                         continue;
777                 page = pfn_to_page(pfn);
778                 if (!get_page_unless_zero(page))
779                         continue;
780                 /*
781                  * We can skip free pages. And we can only deal with pages on
782                  * LRU.
783                  */
784                 ret = isolate_lru_page(page);
785                 if (!ret) { /* Success */
786                         put_page(page);
787                         list_add_tail(&page->lru, &source);
788                         move_pages--;
789                         inc_zone_page_state(page, NR_ISOLATED_ANON +
790                                             page_is_file_cache(page));
791
792                 } else {
793 #ifdef CONFIG_DEBUG_VM
794                         printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
795                                pfn);
796                         dump_page(page);
797 #endif
798                         put_page(page);
799                         /* Because we don't have big zone->lock. we should
800                            check this again here. */
801                         if (page_count(page)) {
802                                 not_managed++;
803                                 ret = -EBUSY;
804                                 break;
805                         }
806                 }
807         }
808         if (!list_empty(&source)) {
809                 if (not_managed) {
810                         putback_lru_pages(&source);
811                         goto out;
812                 }
813
814                 /*
815                  * alloc_migrate_target should be improooooved!!
816                  * migrate_pages returns # of failed pages.
817                  */
818                 ret = migrate_pages(&source, alloc_migrate_target, 0,
819                                                         true, MIGRATE_SYNC);
820                 if (ret)
821                         putback_lru_pages(&source);
822         }
823 out:
824         return ret;
825 }
826
827 /*
828  * remove from free_area[] and mark all as Reserved.
829  */
830 static int
831 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
832                         void *data)
833 {
834         __offline_isolated_pages(start, start + nr_pages);
835         return 0;
836 }
837
838 static void
839 offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
840 {
841         walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
842                                 offline_isolated_pages_cb);
843 }
844
845 /*
846  * Check all pages in range, recoreded as memory resource, are isolated.
847  */
848 static int
849 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
850                         void *data)
851 {
852         int ret;
853         long offlined = *(long *)data;
854         ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
855         offlined = nr_pages;
856         if (!ret)
857                 *(long *)data += offlined;
858         return ret;
859 }
860
861 static long
862 check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
863 {
864         long offlined = 0;
865         int ret;
866
867         ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
868                         check_pages_isolated_cb);
869         if (ret < 0)
870                 offlined = (long)ret;
871         return offlined;
872 }
873
874 static int __ref __offline_pages(unsigned long start_pfn,
875                   unsigned long end_pfn, unsigned long timeout)
876 {
877         unsigned long pfn, nr_pages, expire;
878         long offlined_pages;
879         int ret, drain, retry_max, node;
880         struct zone *zone;
881         struct memory_notify arg;
882
883         BUG_ON(start_pfn >= end_pfn);
884         /* at least, alignment against pageblock is necessary */
885         if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
886                 return -EINVAL;
887         if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
888                 return -EINVAL;
889         /* This makes hotplug much easier...and readable.
890            we assume this for now. .*/
891         if (!test_pages_in_a_zone(start_pfn, end_pfn))
892                 return -EINVAL;
893
894         lock_memory_hotplug();
895
896         zone = page_zone(pfn_to_page(start_pfn));
897         node = zone_to_nid(zone);
898         nr_pages = end_pfn - start_pfn;
899
900         /* set above range as isolated */
901         ret = start_isolate_page_range(start_pfn, end_pfn,
902                                        MIGRATE_MOVABLE, true);
903         if (ret)
904                 goto out;
905
906         arg.start_pfn = start_pfn;
907         arg.nr_pages = nr_pages;
908         arg.status_change_nid = -1;
909         if (nr_pages >= node_present_pages(node))
910                 arg.status_change_nid = node;
911
912         ret = memory_notify(MEM_GOING_OFFLINE, &arg);
913         ret = notifier_to_errno(ret);
914         if (ret)
915                 goto failed_removal;
916
917         pfn = start_pfn;
918         expire = jiffies + timeout;
919         drain = 0;
920         retry_max = 5;
921 repeat:
922         /* start memory hot removal */
923         ret = -EAGAIN;
924         if (time_after(jiffies, expire))
925                 goto failed_removal;
926         ret = -EINTR;
927         if (signal_pending(current))
928                 goto failed_removal;
929         ret = 0;
930         if (drain) {
931                 lru_add_drain_all();
932                 cond_resched();
933                 drain_all_pages();
934         }
935
936         pfn = scan_lru_pages(start_pfn, end_pfn);
937         if (pfn) { /* We have page on LRU */
938                 ret = do_migrate_range(pfn, end_pfn);
939                 if (!ret) {
940                         drain = 1;
941                         goto repeat;
942                 } else {
943                         if (ret < 0)
944                                 if (--retry_max == 0)
945                                         goto failed_removal;
946                         yield();
947                         drain = 1;
948                         goto repeat;
949                 }
950         }
951         /* drain all zone's lru pagevec, this is asyncronous... */
952         lru_add_drain_all();
953         yield();
954         /* drain pcp pages , this is synchrouns. */
955         drain_all_pages();
956         /* check again */
957         offlined_pages = check_pages_isolated(start_pfn, end_pfn);
958         if (offlined_pages < 0) {
959                 ret = -EBUSY;
960                 goto failed_removal;
961         }
962         printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
963         /* Ok, all of our target is islaoted.
964            We cannot do rollback at this point. */
965         offline_isolated_pages(start_pfn, end_pfn);
966         /* reset pagetype flags and makes migrate type to be MOVABLE */
967         undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
968         /* removal success */
969         zone->present_pages -= offlined_pages;
970         zone->zone_pgdat->node_present_pages -= offlined_pages;
971         totalram_pages -= offlined_pages;
972
973         init_per_zone_wmark_min();
974
975         if (!populated_zone(zone)) {
976                 zone_pcp_reset(zone);
977                 mutex_lock(&zonelists_mutex);
978                 build_all_zonelists(NULL, NULL);
979                 mutex_unlock(&zonelists_mutex);
980         } else
981                 zone_pcp_update(zone);
982
983         if (!node_present_pages(node)) {
984                 node_clear_state(node, N_HIGH_MEMORY);
985                 kswapd_stop(node);
986         }
987
988         vm_total_pages = nr_free_pagecache_pages();
989         writeback_set_ratelimit();
990
991         memory_notify(MEM_OFFLINE, &arg);
992         unlock_memory_hotplug();
993         return 0;
994
995 failed_removal:
996         printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
997                (unsigned long long) start_pfn << PAGE_SHIFT,
998                ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
999         memory_notify(MEM_CANCEL_OFFLINE, &arg);
1000         /* pushback to free area */
1001         undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1002
1003 out:
1004         unlock_memory_hotplug();
1005         return ret;
1006 }
1007
1008 int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1009 {
1010         return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
1011 }
1012
1013 int remove_memory(u64 start, u64 size)
1014 {
1015         struct memory_block *mem = NULL;
1016         struct mem_section *section;
1017         unsigned long start_pfn, end_pfn;
1018         unsigned long pfn, section_nr;
1019         int ret;
1020
1021         start_pfn = PFN_DOWN(start);
1022         end_pfn = start_pfn + PFN_DOWN(size);
1023
1024         for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1025                 section_nr = pfn_to_section_nr(pfn);
1026                 if (!present_section_nr(section_nr))
1027                         continue;
1028
1029                 section = __nr_to_section(section_nr);
1030                 /* same memblock? */
1031                 if (mem)
1032                         if ((section_nr >= mem->start_section_nr) &&
1033                             (section_nr <= mem->end_section_nr))
1034                                 continue;
1035
1036                 mem = find_memory_block_hinted(section, mem);
1037                 if (!mem)
1038                         continue;
1039
1040                 ret = offline_memory_block(mem);
1041                 if (ret) {
1042                         kobject_put(&mem->dev.kobj);
1043                         return ret;
1044                 }
1045         }
1046
1047         if (mem)
1048                 kobject_put(&mem->dev.kobj);
1049
1050         return 0;
1051 }
1052 #else
1053 int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1054 {
1055         return -EINVAL;
1056 }
1057 int remove_memory(u64 start, u64 size)
1058 {
1059         return -EINVAL;
1060 }
1061 #endif /* CONFIG_MEMORY_HOTREMOVE */
1062 EXPORT_SYMBOL_GPL(remove_memory);