]> rtime.felk.cvut.cz Git - linux-imx.git/blob - arch/powerpc/mm/numa.c
Merge branch 'v4l_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[linux-imx.git] / arch / powerpc / mm / numa.c
1 /*
2  * pSeries NUMA support
3  *
4  * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/mmzone.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/memblock.h>
21 #include <linux/of.h>
22 #include <linux/pfn.h>
23 #include <linux/cpuset.h>
24 #include <linux/node.h>
25 #include <linux/stop_machine.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/uaccess.h>
29 #include <linux/slab.h>
30 #include <asm/cputhreads.h>
31 #include <asm/sparsemem.h>
32 #include <asm/prom.h>
33 #include <asm/smp.h>
34 #include <asm/firmware.h>
35 #include <asm/paca.h>
36 #include <asm/hvcall.h>
37 #include <asm/setup.h>
38 #include <asm/vdso.h>
39
40 static int numa_enabled = 1;
41
42 static char *cmdline __initdata;
43
44 static int numa_debug;
45 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
46
47 int numa_cpu_lookup_table[NR_CPUS];
48 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
49 struct pglist_data *node_data[MAX_NUMNODES];
50
51 EXPORT_SYMBOL(numa_cpu_lookup_table);
52 EXPORT_SYMBOL(node_to_cpumask_map);
53 EXPORT_SYMBOL(node_data);
54
55 static int min_common_depth;
56 static int n_mem_addr_cells, n_mem_size_cells;
57 static int form1_affinity;
58
59 #define MAX_DISTANCE_REF_POINTS 4
60 static int distance_ref_points_depth;
61 static const unsigned int *distance_ref_points;
62 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
63
64 /*
65  * Allocate node_to_cpumask_map based on number of available nodes
66  * Requires node_possible_map to be valid.
67  *
68  * Note: cpumask_of_node() is not valid until after this is done.
69  */
70 static void __init setup_node_to_cpumask_map(void)
71 {
72         unsigned int node;
73
74         /* setup nr_node_ids if not done yet */
75         if (nr_node_ids == MAX_NUMNODES)
76                 setup_nr_node_ids();
77
78         /* allocate the map */
79         for (node = 0; node < nr_node_ids; node++)
80                 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
81
82         /* cpumask_of_node() will now work */
83         dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
84 }
85
86 static int __init fake_numa_create_new_node(unsigned long end_pfn,
87                                                 unsigned int *nid)
88 {
89         unsigned long long mem;
90         char *p = cmdline;
91         static unsigned int fake_nid;
92         static unsigned long long curr_boundary;
93
94         /*
95          * Modify node id, iff we started creating NUMA nodes
96          * We want to continue from where we left of the last time
97          */
98         if (fake_nid)
99                 *nid = fake_nid;
100         /*
101          * In case there are no more arguments to parse, the
102          * node_id should be the same as the last fake node id
103          * (we've handled this above).
104          */
105         if (!p)
106                 return 0;
107
108         mem = memparse(p, &p);
109         if (!mem)
110                 return 0;
111
112         if (mem < curr_boundary)
113                 return 0;
114
115         curr_boundary = mem;
116
117         if ((end_pfn << PAGE_SHIFT) > mem) {
118                 /*
119                  * Skip commas and spaces
120                  */
121                 while (*p == ',' || *p == ' ' || *p == '\t')
122                         p++;
123
124                 cmdline = p;
125                 fake_nid++;
126                 *nid = fake_nid;
127                 dbg("created new fake_node with id %d\n", fake_nid);
128                 return 1;
129         }
130         return 0;
131 }
132
133 /*
134  * get_node_active_region - Return active region containing pfn
135  * Active range returned is empty if none found.
136  * @pfn: The page to return the region for
137  * @node_ar: Returned set to the active region containing @pfn
138  */
139 static void __init get_node_active_region(unsigned long pfn,
140                                           struct node_active_region *node_ar)
141 {
142         unsigned long start_pfn, end_pfn;
143         int i, nid;
144
145         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
146                 if (pfn >= start_pfn && pfn < end_pfn) {
147                         node_ar->nid = nid;
148                         node_ar->start_pfn = start_pfn;
149                         node_ar->end_pfn = end_pfn;
150                         break;
151                 }
152         }
153 }
154
155 static void map_cpu_to_node(int cpu, int node)
156 {
157         numa_cpu_lookup_table[cpu] = node;
158
159         dbg("adding cpu %d to node %d\n", cpu, node);
160
161         if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
162                 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
163 }
164
165 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
166 static void unmap_cpu_from_node(unsigned long cpu)
167 {
168         int node = numa_cpu_lookup_table[cpu];
169
170         dbg("removing cpu %lu from node %d\n", cpu, node);
171
172         if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
173                 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
174         } else {
175                 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
176                        cpu, node);
177         }
178 }
179 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
180
181 /* must hold reference to node during call */
182 static const int *of_get_associativity(struct device_node *dev)
183 {
184         return of_get_property(dev, "ibm,associativity", NULL);
185 }
186
187 /*
188  * Returns the property linux,drconf-usable-memory if
189  * it exists (the property exists only in kexec/kdump kernels,
190  * added by kexec-tools)
191  */
192 static const u32 *of_get_usable_memory(struct device_node *memory)
193 {
194         const u32 *prop;
195         u32 len;
196         prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
197         if (!prop || len < sizeof(unsigned int))
198                 return 0;
199         return prop;
200 }
201
202 int __node_distance(int a, int b)
203 {
204         int i;
205         int distance = LOCAL_DISTANCE;
206
207         if (!form1_affinity)
208                 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
209
210         for (i = 0; i < distance_ref_points_depth; i++) {
211                 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
212                         break;
213
214                 /* Double the distance for each NUMA level */
215                 distance *= 2;
216         }
217
218         return distance;
219 }
220
221 static void initialize_distance_lookup_table(int nid,
222                 const unsigned int *associativity)
223 {
224         int i;
225
226         if (!form1_affinity)
227                 return;
228
229         for (i = 0; i < distance_ref_points_depth; i++) {
230                 distance_lookup_table[nid][i] =
231                         associativity[distance_ref_points[i]];
232         }
233 }
234
235 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
236  * info is found.
237  */
238 static int associativity_to_nid(const unsigned int *associativity)
239 {
240         int nid = -1;
241
242         if (min_common_depth == -1)
243                 goto out;
244
245         if (associativity[0] >= min_common_depth)
246                 nid = associativity[min_common_depth];
247
248         /* POWER4 LPAR uses 0xffff as invalid node */
249         if (nid == 0xffff || nid >= MAX_NUMNODES)
250                 nid = -1;
251
252         if (nid > 0 && associativity[0] >= distance_ref_points_depth)
253                 initialize_distance_lookup_table(nid, associativity);
254
255 out:
256         return nid;
257 }
258
259 /* Returns the nid associated with the given device tree node,
260  * or -1 if not found.
261  */
262 static int of_node_to_nid_single(struct device_node *device)
263 {
264         int nid = -1;
265         const unsigned int *tmp;
266
267         tmp = of_get_associativity(device);
268         if (tmp)
269                 nid = associativity_to_nid(tmp);
270         return nid;
271 }
272
273 /* Walk the device tree upwards, looking for an associativity id */
274 int of_node_to_nid(struct device_node *device)
275 {
276         struct device_node *tmp;
277         int nid = -1;
278
279         of_node_get(device);
280         while (device) {
281                 nid = of_node_to_nid_single(device);
282                 if (nid != -1)
283                         break;
284
285                 tmp = device;
286                 device = of_get_parent(tmp);
287                 of_node_put(tmp);
288         }
289         of_node_put(device);
290
291         return nid;
292 }
293 EXPORT_SYMBOL_GPL(of_node_to_nid);
294
295 static int __init find_min_common_depth(void)
296 {
297         int depth;
298         struct device_node *root;
299
300         if (firmware_has_feature(FW_FEATURE_OPAL))
301                 root = of_find_node_by_path("/ibm,opal");
302         else
303                 root = of_find_node_by_path("/rtas");
304         if (!root)
305                 root = of_find_node_by_path("/");
306
307         /*
308          * This property is a set of 32-bit integers, each representing
309          * an index into the ibm,associativity nodes.
310          *
311          * With form 0 affinity the first integer is for an SMP configuration
312          * (should be all 0's) and the second is for a normal NUMA
313          * configuration. We have only one level of NUMA.
314          *
315          * With form 1 affinity the first integer is the most significant
316          * NUMA boundary and the following are progressively less significant
317          * boundaries. There can be more than one level of NUMA.
318          */
319         distance_ref_points = of_get_property(root,
320                                         "ibm,associativity-reference-points",
321                                         &distance_ref_points_depth);
322
323         if (!distance_ref_points) {
324                 dbg("NUMA: ibm,associativity-reference-points not found.\n");
325                 goto err;
326         }
327
328         distance_ref_points_depth /= sizeof(int);
329
330         if (firmware_has_feature(FW_FEATURE_OPAL) ||
331             firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
332                 dbg("Using form 1 affinity\n");
333                 form1_affinity = 1;
334         }
335
336         if (form1_affinity) {
337                 depth = distance_ref_points[0];
338         } else {
339                 if (distance_ref_points_depth < 2) {
340                         printk(KERN_WARNING "NUMA: "
341                                 "short ibm,associativity-reference-points\n");
342                         goto err;
343                 }
344
345                 depth = distance_ref_points[1];
346         }
347
348         /*
349          * Warn and cap if the hardware supports more than
350          * MAX_DISTANCE_REF_POINTS domains.
351          */
352         if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
353                 printk(KERN_WARNING "NUMA: distance array capped at "
354                         "%d entries\n", MAX_DISTANCE_REF_POINTS);
355                 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
356         }
357
358         of_node_put(root);
359         return depth;
360
361 err:
362         of_node_put(root);
363         return -1;
364 }
365
366 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
367 {
368         struct device_node *memory = NULL;
369
370         memory = of_find_node_by_type(memory, "memory");
371         if (!memory)
372                 panic("numa.c: No memory nodes found!");
373
374         *n_addr_cells = of_n_addr_cells(memory);
375         *n_size_cells = of_n_size_cells(memory);
376         of_node_put(memory);
377 }
378
379 static unsigned long read_n_cells(int n, const unsigned int **buf)
380 {
381         unsigned long result = 0;
382
383         while (n--) {
384                 result = (result << 32) | **buf;
385                 (*buf)++;
386         }
387         return result;
388 }
389
390 /*
391  * Read the next memblock list entry from the ibm,dynamic-memory property
392  * and return the information in the provided of_drconf_cell structure.
393  */
394 static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
395 {
396         const u32 *cp;
397
398         drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
399
400         cp = *cellp;
401         drmem->drc_index = cp[0];
402         drmem->reserved = cp[1];
403         drmem->aa_index = cp[2];
404         drmem->flags = cp[3];
405
406         *cellp = cp + 4;
407 }
408
409 /*
410  * Retrieve and validate the ibm,dynamic-memory property of the device tree.
411  *
412  * The layout of the ibm,dynamic-memory property is a number N of memblock
413  * list entries followed by N memblock list entries.  Each memblock list entry
414  * contains information as laid out in the of_drconf_cell struct above.
415  */
416 static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
417 {
418         const u32 *prop;
419         u32 len, entries;
420
421         prop = of_get_property(memory, "ibm,dynamic-memory", &len);
422         if (!prop || len < sizeof(unsigned int))
423                 return 0;
424
425         entries = *prop++;
426
427         /* Now that we know the number of entries, revalidate the size
428          * of the property read in to ensure we have everything
429          */
430         if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
431                 return 0;
432
433         *dm = prop;
434         return entries;
435 }
436
437 /*
438  * Retrieve and validate the ibm,lmb-size property for drconf memory
439  * from the device tree.
440  */
441 static u64 of_get_lmb_size(struct device_node *memory)
442 {
443         const u32 *prop;
444         u32 len;
445
446         prop = of_get_property(memory, "ibm,lmb-size", &len);
447         if (!prop || len < sizeof(unsigned int))
448                 return 0;
449
450         return read_n_cells(n_mem_size_cells, &prop);
451 }
452
453 struct assoc_arrays {
454         u32     n_arrays;
455         u32     array_sz;
456         const u32 *arrays;
457 };
458
459 /*
460  * Retrieve and validate the list of associativity arrays for drconf
461  * memory from the ibm,associativity-lookup-arrays property of the
462  * device tree..
463  *
464  * The layout of the ibm,associativity-lookup-arrays property is a number N
465  * indicating the number of associativity arrays, followed by a number M
466  * indicating the size of each associativity array, followed by a list
467  * of N associativity arrays.
468  */
469 static int of_get_assoc_arrays(struct device_node *memory,
470                                struct assoc_arrays *aa)
471 {
472         const u32 *prop;
473         u32 len;
474
475         prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
476         if (!prop || len < 2 * sizeof(unsigned int))
477                 return -1;
478
479         aa->n_arrays = *prop++;
480         aa->array_sz = *prop++;
481
482         /* Now that we know the number of arrays and size of each array,
483          * revalidate the size of the property read in.
484          */
485         if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
486                 return -1;
487
488         aa->arrays = prop;
489         return 0;
490 }
491
492 /*
493  * This is like of_node_to_nid_single() for memory represented in the
494  * ibm,dynamic-reconfiguration-memory node.
495  */
496 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
497                                    struct assoc_arrays *aa)
498 {
499         int default_nid = 0;
500         int nid = default_nid;
501         int index;
502
503         if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
504             !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
505             drmem->aa_index < aa->n_arrays) {
506                 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
507                 nid = aa->arrays[index];
508
509                 if (nid == 0xffff || nid >= MAX_NUMNODES)
510                         nid = default_nid;
511         }
512
513         return nid;
514 }
515
516 /*
517  * Figure out to which domain a cpu belongs and stick it there.
518  * Return the id of the domain used.
519  */
520 static int numa_setup_cpu(unsigned long lcpu)
521 {
522         int nid = 0;
523         struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
524
525         if (!cpu) {
526                 WARN_ON(1);
527                 goto out;
528         }
529
530         nid = of_node_to_nid_single(cpu);
531
532         if (nid < 0 || !node_online(nid))
533                 nid = first_online_node;
534 out:
535         map_cpu_to_node(lcpu, nid);
536
537         of_node_put(cpu);
538
539         return nid;
540 }
541
542 static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
543                              void *hcpu)
544 {
545         unsigned long lcpu = (unsigned long)hcpu;
546         int ret = NOTIFY_DONE;
547
548         switch (action) {
549         case CPU_UP_PREPARE:
550         case CPU_UP_PREPARE_FROZEN:
551                 numa_setup_cpu(lcpu);
552                 ret = NOTIFY_OK;
553                 break;
554 #ifdef CONFIG_HOTPLUG_CPU
555         case CPU_DEAD:
556         case CPU_DEAD_FROZEN:
557         case CPU_UP_CANCELED:
558         case CPU_UP_CANCELED_FROZEN:
559                 unmap_cpu_from_node(lcpu);
560                 break;
561                 ret = NOTIFY_OK;
562 #endif
563         }
564         return ret;
565 }
566
567 /*
568  * Check and possibly modify a memory region to enforce the memory limit.
569  *
570  * Returns the size the region should have to enforce the memory limit.
571  * This will either be the original value of size, a truncated value,
572  * or zero. If the returned value of size is 0 the region should be
573  * discarded as it lies wholly above the memory limit.
574  */
575 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
576                                                       unsigned long size)
577 {
578         /*
579          * We use memblock_end_of_DRAM() in here instead of memory_limit because
580          * we've already adjusted it for the limit and it takes care of
581          * having memory holes below the limit.  Also, in the case of
582          * iommu_is_off, memory_limit is not set but is implicitly enforced.
583          */
584
585         if (start + size <= memblock_end_of_DRAM())
586                 return size;
587
588         if (start >= memblock_end_of_DRAM())
589                 return 0;
590
591         return memblock_end_of_DRAM() - start;
592 }
593
594 /*
595  * Reads the counter for a given entry in
596  * linux,drconf-usable-memory property
597  */
598 static inline int __init read_usm_ranges(const u32 **usm)
599 {
600         /*
601          * For each lmb in ibm,dynamic-memory a corresponding
602          * entry in linux,drconf-usable-memory property contains
603          * a counter followed by that many (base, size) duple.
604          * read the counter from linux,drconf-usable-memory
605          */
606         return read_n_cells(n_mem_size_cells, usm);
607 }
608
609 /*
610  * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
611  * node.  This assumes n_mem_{addr,size}_cells have been set.
612  */
613 static void __init parse_drconf_memory(struct device_node *memory)
614 {
615         const u32 *uninitialized_var(dm), *usm;
616         unsigned int n, rc, ranges, is_kexec_kdump = 0;
617         unsigned long lmb_size, base, size, sz;
618         int nid;
619         struct assoc_arrays aa = { .arrays = NULL };
620
621         n = of_get_drconf_memory(memory, &dm);
622         if (!n)
623                 return;
624
625         lmb_size = of_get_lmb_size(memory);
626         if (!lmb_size)
627                 return;
628
629         rc = of_get_assoc_arrays(memory, &aa);
630         if (rc)
631                 return;
632
633         /* check if this is a kexec/kdump kernel */
634         usm = of_get_usable_memory(memory);
635         if (usm != NULL)
636                 is_kexec_kdump = 1;
637
638         for (; n != 0; --n) {
639                 struct of_drconf_cell drmem;
640
641                 read_drconf_cell(&drmem, &dm);
642
643                 /* skip this block if the reserved bit is set in flags (0x80)
644                    or if the block is not assigned to this partition (0x8) */
645                 if ((drmem.flags & DRCONF_MEM_RESERVED)
646                     || !(drmem.flags & DRCONF_MEM_ASSIGNED))
647                         continue;
648
649                 base = drmem.base_addr;
650                 size = lmb_size;
651                 ranges = 1;
652
653                 if (is_kexec_kdump) {
654                         ranges = read_usm_ranges(&usm);
655                         if (!ranges) /* there are no (base, size) duple */
656                                 continue;
657                 }
658                 do {
659                         if (is_kexec_kdump) {
660                                 base = read_n_cells(n_mem_addr_cells, &usm);
661                                 size = read_n_cells(n_mem_size_cells, &usm);
662                         }
663                         nid = of_drconf_to_nid_single(&drmem, &aa);
664                         fake_numa_create_new_node(
665                                 ((base + size) >> PAGE_SHIFT),
666                                            &nid);
667                         node_set_online(nid);
668                         sz = numa_enforce_memory_limit(base, size);
669                         if (sz)
670                                 memblock_set_node(base, sz, nid);
671                 } while (--ranges);
672         }
673 }
674
675 static int __init parse_numa_properties(void)
676 {
677         struct device_node *memory;
678         int default_nid = 0;
679         unsigned long i;
680
681         if (numa_enabled == 0) {
682                 printk(KERN_WARNING "NUMA disabled by user\n");
683                 return -1;
684         }
685
686         min_common_depth = find_min_common_depth();
687
688         if (min_common_depth < 0)
689                 return min_common_depth;
690
691         dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
692
693         /*
694          * Even though we connect cpus to numa domains later in SMP
695          * init, we need to know the node ids now. This is because
696          * each node to be onlined must have NODE_DATA etc backing it.
697          */
698         for_each_present_cpu(i) {
699                 struct device_node *cpu;
700                 int nid;
701
702                 cpu = of_get_cpu_node(i, NULL);
703                 BUG_ON(!cpu);
704                 nid = of_node_to_nid_single(cpu);
705                 of_node_put(cpu);
706
707                 /*
708                  * Don't fall back to default_nid yet -- we will plug
709                  * cpus into nodes once the memory scan has discovered
710                  * the topology.
711                  */
712                 if (nid < 0)
713                         continue;
714                 node_set_online(nid);
715         }
716
717         get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
718
719         for_each_node_by_type(memory, "memory") {
720                 unsigned long start;
721                 unsigned long size;
722                 int nid;
723                 int ranges;
724                 const unsigned int *memcell_buf;
725                 unsigned int len;
726
727                 memcell_buf = of_get_property(memory,
728                         "linux,usable-memory", &len);
729                 if (!memcell_buf || len <= 0)
730                         memcell_buf = of_get_property(memory, "reg", &len);
731                 if (!memcell_buf || len <= 0)
732                         continue;
733
734                 /* ranges in cell */
735                 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
736 new_range:
737                 /* these are order-sensitive, and modify the buffer pointer */
738                 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
739                 size = read_n_cells(n_mem_size_cells, &memcell_buf);
740
741                 /*
742                  * Assumption: either all memory nodes or none will
743                  * have associativity properties.  If none, then
744                  * everything goes to default_nid.
745                  */
746                 nid = of_node_to_nid_single(memory);
747                 if (nid < 0)
748                         nid = default_nid;
749
750                 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
751                 node_set_online(nid);
752
753                 if (!(size = numa_enforce_memory_limit(start, size))) {
754                         if (--ranges)
755                                 goto new_range;
756                         else
757                                 continue;
758                 }
759
760                 memblock_set_node(start, size, nid);
761
762                 if (--ranges)
763                         goto new_range;
764         }
765
766         /*
767          * Now do the same thing for each MEMBLOCK listed in the
768          * ibm,dynamic-memory property in the
769          * ibm,dynamic-reconfiguration-memory node.
770          */
771         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
772         if (memory)
773                 parse_drconf_memory(memory);
774
775         return 0;
776 }
777
778 static void __init setup_nonnuma(void)
779 {
780         unsigned long top_of_ram = memblock_end_of_DRAM();
781         unsigned long total_ram = memblock_phys_mem_size();
782         unsigned long start_pfn, end_pfn;
783         unsigned int nid = 0;
784         struct memblock_region *reg;
785
786         printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
787                top_of_ram, total_ram);
788         printk(KERN_DEBUG "Memory hole size: %ldMB\n",
789                (top_of_ram - total_ram) >> 20);
790
791         for_each_memblock(memory, reg) {
792                 start_pfn = memblock_region_memory_base_pfn(reg);
793                 end_pfn = memblock_region_memory_end_pfn(reg);
794
795                 fake_numa_create_new_node(end_pfn, &nid);
796                 memblock_set_node(PFN_PHYS(start_pfn),
797                                   PFN_PHYS(end_pfn - start_pfn), nid);
798                 node_set_online(nid);
799         }
800 }
801
802 void __init dump_numa_cpu_topology(void)
803 {
804         unsigned int node;
805         unsigned int cpu, count;
806
807         if (min_common_depth == -1 || !numa_enabled)
808                 return;
809
810         for_each_online_node(node) {
811                 printk(KERN_DEBUG "Node %d CPUs:", node);
812
813                 count = 0;
814                 /*
815                  * If we used a CPU iterator here we would miss printing
816                  * the holes in the cpumap.
817                  */
818                 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
819                         if (cpumask_test_cpu(cpu,
820                                         node_to_cpumask_map[node])) {
821                                 if (count == 0)
822                                         printk(" %u", cpu);
823                                 ++count;
824                         } else {
825                                 if (count > 1)
826                                         printk("-%u", cpu - 1);
827                                 count = 0;
828                         }
829                 }
830
831                 if (count > 1)
832                         printk("-%u", nr_cpu_ids - 1);
833                 printk("\n");
834         }
835 }
836
837 static void __init dump_numa_memory_topology(void)
838 {
839         unsigned int node;
840         unsigned int count;
841
842         if (min_common_depth == -1 || !numa_enabled)
843                 return;
844
845         for_each_online_node(node) {
846                 unsigned long i;
847
848                 printk(KERN_DEBUG "Node %d Memory:", node);
849
850                 count = 0;
851
852                 for (i = 0; i < memblock_end_of_DRAM();
853                      i += (1 << SECTION_SIZE_BITS)) {
854                         if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
855                                 if (count == 0)
856                                         printk(" 0x%lx", i);
857                                 ++count;
858                         } else {
859                                 if (count > 0)
860                                         printk("-0x%lx", i);
861                                 count = 0;
862                         }
863                 }
864
865                 if (count > 0)
866                         printk("-0x%lx", i);
867                 printk("\n");
868         }
869 }
870
871 /*
872  * Allocate some memory, satisfying the memblock or bootmem allocator where
873  * required. nid is the preferred node and end is the physical address of
874  * the highest address in the node.
875  *
876  * Returns the virtual address of the memory.
877  */
878 static void __init *careful_zallocation(int nid, unsigned long size,
879                                        unsigned long align,
880                                        unsigned long end_pfn)
881 {
882         void *ret;
883         int new_nid;
884         unsigned long ret_paddr;
885
886         ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
887
888         /* retry over all memory */
889         if (!ret_paddr)
890                 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
891
892         if (!ret_paddr)
893                 panic("numa.c: cannot allocate %lu bytes for node %d",
894                       size, nid);
895
896         ret = __va(ret_paddr);
897
898         /*
899          * We initialize the nodes in numeric order: 0, 1, 2...
900          * and hand over control from the MEMBLOCK allocator to the
901          * bootmem allocator.  If this function is called for
902          * node 5, then we know that all nodes <5 are using the
903          * bootmem allocator instead of the MEMBLOCK allocator.
904          *
905          * So, check the nid from which this allocation came
906          * and double check to see if we need to use bootmem
907          * instead of the MEMBLOCK.  We don't free the MEMBLOCK memory
908          * since it would be useless.
909          */
910         new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
911         if (new_nid < nid) {
912                 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
913                                 size, align, 0);
914
915                 dbg("alloc_bootmem %p %lx\n", ret, size);
916         }
917
918         memset(ret, 0, size);
919         return ret;
920 }
921
922 static struct notifier_block ppc64_numa_nb = {
923         .notifier_call = cpu_numa_callback,
924         .priority = 1 /* Must run before sched domains notifier. */
925 };
926
927 static void __init mark_reserved_regions_for_nid(int nid)
928 {
929         struct pglist_data *node = NODE_DATA(nid);
930         struct memblock_region *reg;
931
932         for_each_memblock(reserved, reg) {
933                 unsigned long physbase = reg->base;
934                 unsigned long size = reg->size;
935                 unsigned long start_pfn = physbase >> PAGE_SHIFT;
936                 unsigned long end_pfn = PFN_UP(physbase + size);
937                 struct node_active_region node_ar;
938                 unsigned long node_end_pfn = node->node_start_pfn +
939                                              node->node_spanned_pages;
940
941                 /*
942                  * Check to make sure that this memblock.reserved area is
943                  * within the bounds of the node that we care about.
944                  * Checking the nid of the start and end points is not
945                  * sufficient because the reserved area could span the
946                  * entire node.
947                  */
948                 if (end_pfn <= node->node_start_pfn ||
949                     start_pfn >= node_end_pfn)
950                         continue;
951
952                 get_node_active_region(start_pfn, &node_ar);
953                 while (start_pfn < end_pfn &&
954                         node_ar.start_pfn < node_ar.end_pfn) {
955                         unsigned long reserve_size = size;
956                         /*
957                          * if reserved region extends past active region
958                          * then trim size to active region
959                          */
960                         if (end_pfn > node_ar.end_pfn)
961                                 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
962                                         - physbase;
963                         /*
964                          * Only worry about *this* node, others may not
965                          * yet have valid NODE_DATA().
966                          */
967                         if (node_ar.nid == nid) {
968                                 dbg("reserve_bootmem %lx %lx nid=%d\n",
969                                         physbase, reserve_size, node_ar.nid);
970                                 reserve_bootmem_node(NODE_DATA(node_ar.nid),
971                                                 physbase, reserve_size,
972                                                 BOOTMEM_DEFAULT);
973                         }
974                         /*
975                          * if reserved region is contained in the active region
976                          * then done.
977                          */
978                         if (end_pfn <= node_ar.end_pfn)
979                                 break;
980
981                         /*
982                          * reserved region extends past the active region
983                          *   get next active region that contains this
984                          *   reserved region
985                          */
986                         start_pfn = node_ar.end_pfn;
987                         physbase = start_pfn << PAGE_SHIFT;
988                         size = size - reserve_size;
989                         get_node_active_region(start_pfn, &node_ar);
990                 }
991         }
992 }
993
994
995 void __init do_init_bootmem(void)
996 {
997         int nid;
998
999         min_low_pfn = 0;
1000         max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1001         max_pfn = max_low_pfn;
1002
1003         if (parse_numa_properties())
1004                 setup_nonnuma();
1005         else
1006                 dump_numa_memory_topology();
1007
1008         for_each_online_node(nid) {
1009                 unsigned long start_pfn, end_pfn;
1010                 void *bootmem_vaddr;
1011                 unsigned long bootmap_pages;
1012
1013                 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1014
1015                 /*
1016                  * Allocate the node structure node local if possible
1017                  *
1018                  * Be careful moving this around, as it relies on all
1019                  * previous nodes' bootmem to be initialized and have
1020                  * all reserved areas marked.
1021                  */
1022                 NODE_DATA(nid) = careful_zallocation(nid,
1023                                         sizeof(struct pglist_data),
1024                                         SMP_CACHE_BYTES, end_pfn);
1025
1026                 dbg("node %d\n", nid);
1027                 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1028
1029                 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1030                 NODE_DATA(nid)->node_start_pfn = start_pfn;
1031                 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1032
1033                 if (NODE_DATA(nid)->node_spanned_pages == 0)
1034                         continue;
1035
1036                 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1037                 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1038
1039                 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1040                 bootmem_vaddr = careful_zallocation(nid,
1041                                         bootmap_pages << PAGE_SHIFT,
1042                                         PAGE_SIZE, end_pfn);
1043
1044                 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1045
1046                 init_bootmem_node(NODE_DATA(nid),
1047                                   __pa(bootmem_vaddr) >> PAGE_SHIFT,
1048                                   start_pfn, end_pfn);
1049
1050                 free_bootmem_with_active_regions(nid, end_pfn);
1051                 /*
1052                  * Be very careful about moving this around.  Future
1053                  * calls to careful_zallocation() depend on this getting
1054                  * done correctly.
1055                  */
1056                 mark_reserved_regions_for_nid(nid);
1057                 sparse_memory_present_with_active_regions(nid);
1058         }
1059
1060         init_bootmem_done = 1;
1061
1062         /*
1063          * Now bootmem is initialised we can create the node to cpumask
1064          * lookup tables and setup the cpu callback to populate them.
1065          */
1066         setup_node_to_cpumask_map();
1067
1068         register_cpu_notifier(&ppc64_numa_nb);
1069         cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1070                           (void *)(unsigned long)boot_cpuid);
1071 }
1072
1073 void __init paging_init(void)
1074 {
1075         unsigned long max_zone_pfns[MAX_NR_ZONES];
1076         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1077         max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1078         free_area_init_nodes(max_zone_pfns);
1079 }
1080
1081 static int __init early_numa(char *p)
1082 {
1083         if (!p)
1084                 return 0;
1085
1086         if (strstr(p, "off"))
1087                 numa_enabled = 0;
1088
1089         if (strstr(p, "debug"))
1090                 numa_debug = 1;
1091
1092         p = strstr(p, "fake=");
1093         if (p)
1094                 cmdline = p + strlen("fake=");
1095
1096         return 0;
1097 }
1098 early_param("numa", early_numa);
1099
1100 #ifdef CONFIG_MEMORY_HOTPLUG
1101 /*
1102  * Find the node associated with a hot added memory section for
1103  * memory represented in the device tree by the property
1104  * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1105  */
1106 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1107                                      unsigned long scn_addr)
1108 {
1109         const u32 *dm;
1110         unsigned int drconf_cell_cnt, rc;
1111         unsigned long lmb_size;
1112         struct assoc_arrays aa;
1113         int nid = -1;
1114
1115         drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1116         if (!drconf_cell_cnt)
1117                 return -1;
1118
1119         lmb_size = of_get_lmb_size(memory);
1120         if (!lmb_size)
1121                 return -1;
1122
1123         rc = of_get_assoc_arrays(memory, &aa);
1124         if (rc)
1125                 return -1;
1126
1127         for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1128                 struct of_drconf_cell drmem;
1129
1130                 read_drconf_cell(&drmem, &dm);
1131
1132                 /* skip this block if it is reserved or not assigned to
1133                  * this partition */
1134                 if ((drmem.flags & DRCONF_MEM_RESERVED)
1135                     || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1136                         continue;
1137
1138                 if ((scn_addr < drmem.base_addr)
1139                     || (scn_addr >= (drmem.base_addr + lmb_size)))
1140                         continue;
1141
1142                 nid = of_drconf_to_nid_single(&drmem, &aa);
1143                 break;
1144         }
1145
1146         return nid;
1147 }
1148
1149 /*
1150  * Find the node associated with a hot added memory section for memory
1151  * represented in the device tree as a node (i.e. memory@XXXX) for
1152  * each memblock.
1153  */
1154 int hot_add_node_scn_to_nid(unsigned long scn_addr)
1155 {
1156         struct device_node *memory;
1157         int nid = -1;
1158
1159         for_each_node_by_type(memory, "memory") {
1160                 unsigned long start, size;
1161                 int ranges;
1162                 const unsigned int *memcell_buf;
1163                 unsigned int len;
1164
1165                 memcell_buf = of_get_property(memory, "reg", &len);
1166                 if (!memcell_buf || len <= 0)
1167                         continue;
1168
1169                 /* ranges in cell */
1170                 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1171
1172                 while (ranges--) {
1173                         start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1174                         size = read_n_cells(n_mem_size_cells, &memcell_buf);
1175
1176                         if ((scn_addr < start) || (scn_addr >= (start + size)))
1177                                 continue;
1178
1179                         nid = of_node_to_nid_single(memory);
1180                         break;
1181                 }
1182
1183                 if (nid >= 0)
1184                         break;
1185         }
1186
1187         of_node_put(memory);
1188
1189         return nid;
1190 }
1191
1192 /*
1193  * Find the node associated with a hot added memory section.  Section
1194  * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1195  * sections are fully contained within a single MEMBLOCK.
1196  */
1197 int hot_add_scn_to_nid(unsigned long scn_addr)
1198 {
1199         struct device_node *memory = NULL;
1200         int nid, found = 0;
1201
1202         if (!numa_enabled || (min_common_depth < 0))
1203                 return first_online_node;
1204
1205         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1206         if (memory) {
1207                 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1208                 of_node_put(memory);
1209         } else {
1210                 nid = hot_add_node_scn_to_nid(scn_addr);
1211         }
1212
1213         if (nid < 0 || !node_online(nid))
1214                 nid = first_online_node;
1215
1216         if (NODE_DATA(nid)->node_spanned_pages)
1217                 return nid;
1218
1219         for_each_online_node(nid) {
1220                 if (NODE_DATA(nid)->node_spanned_pages) {
1221                         found = 1;
1222                         break;
1223                 }
1224         }
1225
1226         BUG_ON(!found);
1227         return nid;
1228 }
1229
1230 static u64 hot_add_drconf_memory_max(void)
1231 {
1232         struct device_node *memory = NULL;
1233         unsigned int drconf_cell_cnt = 0;
1234         u64 lmb_size = 0;
1235         const u32 *dm = 0;
1236
1237         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1238         if (memory) {
1239                 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1240                 lmb_size = of_get_lmb_size(memory);
1241                 of_node_put(memory);
1242         }
1243         return lmb_size * drconf_cell_cnt;
1244 }
1245
1246 /*
1247  * memory_hotplug_max - return max address of memory that may be added
1248  *
1249  * This is currently only used on systems that support drconfig memory
1250  * hotplug.
1251  */
1252 u64 memory_hotplug_max(void)
1253 {
1254         return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1255 }
1256 #endif /* CONFIG_MEMORY_HOTPLUG */
1257
1258 /* Virtual Processor Home Node (VPHN) support */
1259 #ifdef CONFIG_PPC_SPLPAR
1260 struct topology_update_data {
1261         struct topology_update_data *next;
1262         unsigned int cpu;
1263         int old_nid;
1264         int new_nid;
1265 };
1266
1267 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1268 static cpumask_t cpu_associativity_changes_mask;
1269 static int vphn_enabled;
1270 static int prrn_enabled;
1271 static void reset_topology_timer(void);
1272
1273 /*
1274  * Store the current values of the associativity change counters in the
1275  * hypervisor.
1276  */
1277 static void setup_cpu_associativity_change_counters(void)
1278 {
1279         int cpu;
1280
1281         /* The VPHN feature supports a maximum of 8 reference points */
1282         BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1283
1284         for_each_possible_cpu(cpu) {
1285                 int i;
1286                 u8 *counts = vphn_cpu_change_counts[cpu];
1287                 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1288
1289                 for (i = 0; i < distance_ref_points_depth; i++)
1290                         counts[i] = hypervisor_counts[i];
1291         }
1292 }
1293
1294 /*
1295  * The hypervisor maintains a set of 8 associativity change counters in
1296  * the VPA of each cpu that correspond to the associativity levels in the
1297  * ibm,associativity-reference-points property. When an associativity
1298  * level changes, the corresponding counter is incremented.
1299  *
1300  * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1301  * node associativity levels have changed.
1302  *
1303  * Returns the number of cpus with unhandled associativity changes.
1304  */
1305 static int update_cpu_associativity_changes_mask(void)
1306 {
1307         int cpu;
1308         cpumask_t *changes = &cpu_associativity_changes_mask;
1309
1310         for_each_possible_cpu(cpu) {
1311                 int i, changed = 0;
1312                 u8 *counts = vphn_cpu_change_counts[cpu];
1313                 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1314
1315                 for (i = 0; i < distance_ref_points_depth; i++) {
1316                         if (hypervisor_counts[i] != counts[i]) {
1317                                 counts[i] = hypervisor_counts[i];
1318                                 changed = 1;
1319                         }
1320                 }
1321                 if (changed) {
1322                         cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1323                         cpu = cpu_last_thread_sibling(cpu);
1324                 }
1325         }
1326
1327         return cpumask_weight(changes);
1328 }
1329
1330 /*
1331  * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1332  * the complete property we have to add the length in the first cell.
1333  */
1334 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1335
1336 /*
1337  * Convert the associativity domain numbers returned from the hypervisor
1338  * to the sequence they would appear in the ibm,associativity property.
1339  */
1340 static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1341 {
1342         int i, nr_assoc_doms = 0;
1343         const u16 *field = (const u16*) packed;
1344
1345 #define VPHN_FIELD_UNUSED       (0xffff)
1346 #define VPHN_FIELD_MSB          (0x8000)
1347 #define VPHN_FIELD_MASK         (~VPHN_FIELD_MSB)
1348
1349         for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1350                 if (*field == VPHN_FIELD_UNUSED) {
1351                         /* All significant fields processed, and remaining
1352                          * fields contain the reserved value of all 1's.
1353                          * Just store them.
1354                          */
1355                         unpacked[i] = *((u32*)field);
1356                         field += 2;
1357                 } else if (*field & VPHN_FIELD_MSB) {
1358                         /* Data is in the lower 15 bits of this field */
1359                         unpacked[i] = *field & VPHN_FIELD_MASK;
1360                         field++;
1361                         nr_assoc_doms++;
1362                 } else {
1363                         /* Data is in the lower 15 bits of this field
1364                          * concatenated with the next 16 bit field
1365                          */
1366                         unpacked[i] = *((u32*)field);
1367                         field += 2;
1368                         nr_assoc_doms++;
1369                 }
1370         }
1371
1372         /* The first cell contains the length of the property */
1373         unpacked[0] = nr_assoc_doms;
1374
1375         return nr_assoc_doms;
1376 }
1377
1378 /*
1379  * Retrieve the new associativity information for a virtual processor's
1380  * home node.
1381  */
1382 static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
1383 {
1384         long rc;
1385         long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1386         u64 flags = 1;
1387         int hwcpu = get_hard_smp_processor_id(cpu);
1388
1389         rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1390         vphn_unpack_associativity(retbuf, associativity);
1391
1392         return rc;
1393 }
1394
1395 static long vphn_get_associativity(unsigned long cpu,
1396                                         unsigned int *associativity)
1397 {
1398         long rc;
1399
1400         rc = hcall_vphn(cpu, associativity);
1401
1402         switch (rc) {
1403         case H_FUNCTION:
1404                 printk(KERN_INFO
1405                         "VPHN is not supported. Disabling polling...\n");
1406                 stop_topology_update();
1407                 break;
1408         case H_HARDWARE:
1409                 printk(KERN_ERR
1410                         "hcall_vphn() experienced a hardware fault "
1411                         "preventing VPHN. Disabling polling...\n");
1412                 stop_topology_update();
1413         }
1414
1415         return rc;
1416 }
1417
1418 /*
1419  * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1420  * characteristics change. This function doesn't perform any locking and is
1421  * only safe to call from stop_machine().
1422  */
1423 static int update_cpu_topology(void *data)
1424 {
1425         struct topology_update_data *update;
1426         unsigned long cpu;
1427
1428         if (!data)
1429                 return -EINVAL;
1430
1431         cpu = smp_processor_id();
1432
1433         for (update = data; update; update = update->next) {
1434                 if (cpu != update->cpu)
1435                         continue;
1436
1437                 unmap_cpu_from_node(update->cpu);
1438                 map_cpu_to_node(update->cpu, update->new_nid);
1439                 vdso_getcpu_init();
1440         }
1441
1442         return 0;
1443 }
1444
1445 /*
1446  * Update the node maps and sysfs entries for each cpu whose home node
1447  * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1448  */
1449 int arch_update_cpu_topology(void)
1450 {
1451         unsigned int cpu, sibling, changed = 0;
1452         struct topology_update_data *updates, *ud;
1453         unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1454         cpumask_t updated_cpus;
1455         struct device *dev;
1456         int weight, new_nid, i = 0;
1457
1458         weight = cpumask_weight(&cpu_associativity_changes_mask);
1459         if (!weight)
1460                 return 0;
1461
1462         updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1463         if (!updates)
1464                 return 0;
1465
1466         cpumask_clear(&updated_cpus);
1467
1468         for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1469                 /*
1470                  * If siblings aren't flagged for changes, updates list
1471                  * will be too short. Skip on this update and set for next
1472                  * update.
1473                  */
1474                 if (!cpumask_subset(cpu_sibling_mask(cpu),
1475                                         &cpu_associativity_changes_mask)) {
1476                         pr_info("Sibling bits not set for associativity "
1477                                         "change, cpu%d\n", cpu);
1478                         cpumask_or(&cpu_associativity_changes_mask,
1479                                         &cpu_associativity_changes_mask,
1480                                         cpu_sibling_mask(cpu));
1481                         cpu = cpu_last_thread_sibling(cpu);
1482                         continue;
1483                 }
1484
1485                 /* Use associativity from first thread for all siblings */
1486                 vphn_get_associativity(cpu, associativity);
1487                 new_nid = associativity_to_nid(associativity);
1488                 if (new_nid < 0 || !node_online(new_nid))
1489                         new_nid = first_online_node;
1490
1491                 if (new_nid == numa_cpu_lookup_table[cpu]) {
1492                         cpumask_andnot(&cpu_associativity_changes_mask,
1493                                         &cpu_associativity_changes_mask,
1494                                         cpu_sibling_mask(cpu));
1495                         cpu = cpu_last_thread_sibling(cpu);
1496                         continue;
1497                 }
1498
1499                 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1500                         ud = &updates[i++];
1501                         ud->cpu = sibling;
1502                         ud->new_nid = new_nid;
1503                         ud->old_nid = numa_cpu_lookup_table[sibling];
1504                         cpumask_set_cpu(sibling, &updated_cpus);
1505                         if (i < weight)
1506                                 ud->next = &updates[i];
1507                 }
1508                 cpu = cpu_last_thread_sibling(cpu);
1509         }
1510
1511         stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1512
1513         for (ud = &updates[0]; ud; ud = ud->next) {
1514                 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1515                 register_cpu_under_node(ud->cpu, ud->new_nid);
1516
1517                 dev = get_cpu_device(ud->cpu);
1518                 if (dev)
1519                         kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1520                 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1521                 changed = 1;
1522         }
1523
1524         kfree(updates);
1525         return changed;
1526 }
1527
1528 static void topology_work_fn(struct work_struct *work)
1529 {
1530         rebuild_sched_domains();
1531 }
1532 static DECLARE_WORK(topology_work, topology_work_fn);
1533
1534 void topology_schedule_update(void)
1535 {
1536         schedule_work(&topology_work);
1537 }
1538
1539 static void topology_timer_fn(unsigned long ignored)
1540 {
1541         if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1542                 topology_schedule_update();
1543         else if (vphn_enabled) {
1544                 if (update_cpu_associativity_changes_mask() > 0)
1545                         topology_schedule_update();
1546                 reset_topology_timer();
1547         }
1548 }
1549 static struct timer_list topology_timer =
1550         TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1551
1552 static void reset_topology_timer(void)
1553 {
1554         topology_timer.data = 0;
1555         topology_timer.expires = jiffies + 60 * HZ;
1556         mod_timer(&topology_timer, topology_timer.expires);
1557 }
1558
1559 #ifdef CONFIG_SMP
1560
1561 static void stage_topology_update(int core_id)
1562 {
1563         cpumask_or(&cpu_associativity_changes_mask,
1564                 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1565         reset_topology_timer();
1566 }
1567
1568 static int dt_update_callback(struct notifier_block *nb,
1569                                 unsigned long action, void *data)
1570 {
1571         struct of_prop_reconfig *update;
1572         int rc = NOTIFY_DONE;
1573
1574         switch (action) {
1575         case OF_RECONFIG_UPDATE_PROPERTY:
1576                 update = (struct of_prop_reconfig *)data;
1577                 if (!of_prop_cmp(update->dn->type, "cpu") &&
1578                     !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1579                         u32 core_id;
1580                         of_property_read_u32(update->dn, "reg", &core_id);
1581                         stage_topology_update(core_id);
1582                         rc = NOTIFY_OK;
1583                 }
1584                 break;
1585         }
1586
1587         return rc;
1588 }
1589
1590 static struct notifier_block dt_update_nb = {
1591         .notifier_call = dt_update_callback,
1592 };
1593
1594 #endif
1595
1596 /*
1597  * Start polling for associativity changes.
1598  */
1599 int start_topology_update(void)
1600 {
1601         int rc = 0;
1602
1603         if (firmware_has_feature(FW_FEATURE_PRRN)) {
1604                 if (!prrn_enabled) {
1605                         prrn_enabled = 1;
1606                         vphn_enabled = 0;
1607 #ifdef CONFIG_SMP
1608                         rc = of_reconfig_notifier_register(&dt_update_nb);
1609 #endif
1610                 }
1611         } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1612                    get_lppaca()->shared_proc) {
1613                 if (!vphn_enabled) {
1614                         prrn_enabled = 0;
1615                         vphn_enabled = 1;
1616                         setup_cpu_associativity_change_counters();
1617                         init_timer_deferrable(&topology_timer);
1618                         reset_topology_timer();
1619                 }
1620         }
1621
1622         return rc;
1623 }
1624
1625 /*
1626  * Disable polling for VPHN associativity changes.
1627  */
1628 int stop_topology_update(void)
1629 {
1630         int rc = 0;
1631
1632         if (prrn_enabled) {
1633                 prrn_enabled = 0;
1634 #ifdef CONFIG_SMP
1635                 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1636 #endif
1637         } else if (vphn_enabled) {
1638                 vphn_enabled = 0;
1639                 rc = del_timer_sync(&topology_timer);
1640         }
1641
1642         return rc;
1643 }
1644
1645 int prrn_is_enabled(void)
1646 {
1647         return prrn_enabled;
1648 }
1649
1650 static int topology_read(struct seq_file *file, void *v)
1651 {
1652         if (vphn_enabled || prrn_enabled)
1653                 seq_puts(file, "on\n");
1654         else
1655                 seq_puts(file, "off\n");
1656
1657         return 0;
1658 }
1659
1660 static int topology_open(struct inode *inode, struct file *file)
1661 {
1662         return single_open(file, topology_read, NULL);
1663 }
1664
1665 static ssize_t topology_write(struct file *file, const char __user *buf,
1666                               size_t count, loff_t *off)
1667 {
1668         char kbuf[4]; /* "on" or "off" plus null. */
1669         int read_len;
1670
1671         read_len = count < 3 ? count : 3;
1672         if (copy_from_user(kbuf, buf, read_len))
1673                 return -EINVAL;
1674
1675         kbuf[read_len] = '\0';
1676
1677         if (!strncmp(kbuf, "on", 2))
1678                 start_topology_update();
1679         else if (!strncmp(kbuf, "off", 3))
1680                 stop_topology_update();
1681         else
1682                 return -EINVAL;
1683
1684         return count;
1685 }
1686
1687 static const struct file_operations topology_ops = {
1688         .read = seq_read,
1689         .write = topology_write,
1690         .open = topology_open,
1691         .release = single_release
1692 };
1693
1694 static int topology_update_init(void)
1695 {
1696         start_topology_update();
1697         proc_create("powerpc/topology_updates", 644, NULL, &topology_ops);
1698
1699         return 0;
1700 }
1701 device_initcall(topology_update_init);
1702 #endif /* CONFIG_PPC_SPLPAR */