]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blob - arch/arm/kernel/setup.c
ARM: integrate CMA with DMA-mapping subsystem
[can-eth-gw-linux.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/fs.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34
35 #include <asm/unified.h>
36 #include <asm/cp15.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/elf.h>
40 #include <asm/procinfo.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/smp_plat.h>
44 #include <asm/mach-types.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cachetype.h>
47 #include <asm/tlbflush.h>
48
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/system_info.h>
54 #include <asm/system_misc.h>
55 #include <asm/traps.h>
56 #include <asm/unwind.h>
57 #include <asm/memblock.h>
58
59 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
60 #include "compat.h"
61 #endif
62 #include "atags.h"
63 #include "tcm.h"
64
65 #ifndef MEM_SIZE
66 #define MEM_SIZE        (16*1024*1024)
67 #endif
68
69 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
70 char fpe_type[8];
71
72 static int __init fpe_setup(char *line)
73 {
74         memcpy(fpe_type, line, 8);
75         return 1;
76 }
77
78 __setup("fpe=", fpe_setup);
79 #endif
80
81 extern void paging_init(struct machine_desc *desc);
82 extern void sanity_check_meminfo(void);
83 extern void reboot_setup(char *str);
84 extern void setup_dma_zone(struct machine_desc *desc);
85
86 unsigned int processor_id;
87 EXPORT_SYMBOL(processor_id);
88 unsigned int __machine_arch_type __read_mostly;
89 EXPORT_SYMBOL(__machine_arch_type);
90 unsigned int cacheid __read_mostly;
91 EXPORT_SYMBOL(cacheid);
92
93 unsigned int __atags_pointer __initdata;
94
95 unsigned int system_rev;
96 EXPORT_SYMBOL(system_rev);
97
98 unsigned int system_serial_low;
99 EXPORT_SYMBOL(system_serial_low);
100
101 unsigned int system_serial_high;
102 EXPORT_SYMBOL(system_serial_high);
103
104 unsigned int elf_hwcap __read_mostly;
105 EXPORT_SYMBOL(elf_hwcap);
106
107
108 #ifdef MULTI_CPU
109 struct processor processor __read_mostly;
110 #endif
111 #ifdef MULTI_TLB
112 struct cpu_tlb_fns cpu_tlb __read_mostly;
113 #endif
114 #ifdef MULTI_USER
115 struct cpu_user_fns cpu_user __read_mostly;
116 #endif
117 #ifdef MULTI_CACHE
118 struct cpu_cache_fns cpu_cache __read_mostly;
119 #endif
120 #ifdef CONFIG_OUTER_CACHE
121 struct outer_cache_fns outer_cache __read_mostly;
122 EXPORT_SYMBOL(outer_cache);
123 #endif
124
125 /*
126  * Cached cpu_architecture() result for use by assembler code.
127  * C code should use the cpu_architecture() function instead of accessing this
128  * variable directly.
129  */
130 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
131
132 struct stack {
133         u32 irq[3];
134         u32 abt[3];
135         u32 und[3];
136 } ____cacheline_aligned;
137
138 static struct stack stacks[NR_CPUS];
139
140 char elf_platform[ELF_PLATFORM_SIZE];
141 EXPORT_SYMBOL(elf_platform);
142
143 static const char *cpu_name;
144 static const char *machine_name;
145 static char __initdata cmd_line[COMMAND_LINE_SIZE];
146 struct machine_desc *machine_desc __initdata;
147
148 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
149 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
150 #define ENDIANNESS ((char)endian_test.l)
151
152 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
153
154 /*
155  * Standard memory resources
156  */
157 static struct resource mem_res[] = {
158         {
159                 .name = "Video RAM",
160                 .start = 0,
161                 .end = 0,
162                 .flags = IORESOURCE_MEM
163         },
164         {
165                 .name = "Kernel code",
166                 .start = 0,
167                 .end = 0,
168                 .flags = IORESOURCE_MEM
169         },
170         {
171                 .name = "Kernel data",
172                 .start = 0,
173                 .end = 0,
174                 .flags = IORESOURCE_MEM
175         }
176 };
177
178 #define video_ram   mem_res[0]
179 #define kernel_code mem_res[1]
180 #define kernel_data mem_res[2]
181
182 static struct resource io_res[] = {
183         {
184                 .name = "reserved",
185                 .start = 0x3bc,
186                 .end = 0x3be,
187                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
188         },
189         {
190                 .name = "reserved",
191                 .start = 0x378,
192                 .end = 0x37f,
193                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
194         },
195         {
196                 .name = "reserved",
197                 .start = 0x278,
198                 .end = 0x27f,
199                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
200         }
201 };
202
203 #define lp0 io_res[0]
204 #define lp1 io_res[1]
205 #define lp2 io_res[2]
206
207 static const char *proc_arch[] = {
208         "undefined/unknown",
209         "3",
210         "4",
211         "4T",
212         "5",
213         "5T",
214         "5TE",
215         "5TEJ",
216         "6TEJ",
217         "7",
218         "?(11)",
219         "?(12)",
220         "?(13)",
221         "?(14)",
222         "?(15)",
223         "?(16)",
224         "?(17)",
225 };
226
227 static int __get_cpu_architecture(void)
228 {
229         int cpu_arch;
230
231         if ((read_cpuid_id() & 0x0008f000) == 0) {
232                 cpu_arch = CPU_ARCH_UNKNOWN;
233         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
234                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
235         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
236                 cpu_arch = (read_cpuid_id() >> 16) & 7;
237                 if (cpu_arch)
238                         cpu_arch += CPU_ARCH_ARMv3;
239         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
240                 unsigned int mmfr0;
241
242                 /* Revised CPUID format. Read the Memory Model Feature
243                  * Register 0 and check for VMSAv7 or PMSAv7 */
244                 asm("mrc        p15, 0, %0, c0, c1, 4"
245                     : "=r" (mmfr0));
246                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
247                     (mmfr0 & 0x000000f0) >= 0x00000030)
248                         cpu_arch = CPU_ARCH_ARMv7;
249                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
250                          (mmfr0 & 0x000000f0) == 0x00000020)
251                         cpu_arch = CPU_ARCH_ARMv6;
252                 else
253                         cpu_arch = CPU_ARCH_UNKNOWN;
254         } else
255                 cpu_arch = CPU_ARCH_UNKNOWN;
256
257         return cpu_arch;
258 }
259
260 int __pure cpu_architecture(void)
261 {
262         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
263
264         return __cpu_architecture;
265 }
266
267 static int cpu_has_aliasing_icache(unsigned int arch)
268 {
269         int aliasing_icache;
270         unsigned int id_reg, num_sets, line_size;
271
272         /* PIPT caches never alias. */
273         if (icache_is_pipt())
274                 return 0;
275
276         /* arch specifies the register format */
277         switch (arch) {
278         case CPU_ARCH_ARMv7:
279                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
280                     : /* No output operands */
281                     : "r" (1));
282                 isb();
283                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
284                     : "=r" (id_reg));
285                 line_size = 4 << ((id_reg & 0x7) + 2);
286                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
287                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
288                 break;
289         case CPU_ARCH_ARMv6:
290                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
291                 break;
292         default:
293                 /* I-cache aliases will be handled by D-cache aliasing code */
294                 aliasing_icache = 0;
295         }
296
297         return aliasing_icache;
298 }
299
300 static void __init cacheid_init(void)
301 {
302         unsigned int cachetype = read_cpuid_cachetype();
303         unsigned int arch = cpu_architecture();
304
305         if (arch >= CPU_ARCH_ARMv6) {
306                 if ((cachetype & (7 << 29)) == 4 << 29) {
307                         /* ARMv7 register format */
308                         arch = CPU_ARCH_ARMv7;
309                         cacheid = CACHEID_VIPT_NONALIASING;
310                         switch (cachetype & (3 << 14)) {
311                         case (1 << 14):
312                                 cacheid |= CACHEID_ASID_TAGGED;
313                                 break;
314                         case (3 << 14):
315                                 cacheid |= CACHEID_PIPT;
316                                 break;
317                         }
318                 } else {
319                         arch = CPU_ARCH_ARMv6;
320                         if (cachetype & (1 << 23))
321                                 cacheid = CACHEID_VIPT_ALIASING;
322                         else
323                                 cacheid = CACHEID_VIPT_NONALIASING;
324                 }
325                 if (cpu_has_aliasing_icache(arch))
326                         cacheid |= CACHEID_VIPT_I_ALIASING;
327         } else {
328                 cacheid = CACHEID_VIVT;
329         }
330
331         printk("CPU: %s data cache, %s instruction cache\n",
332                 cache_is_vivt() ? "VIVT" :
333                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
334                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
335                 cache_is_vivt() ? "VIVT" :
336                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
337                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
338                 icache_is_pipt() ? "PIPT" :
339                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
340 }
341
342 /*
343  * These functions re-use the assembly code in head.S, which
344  * already provide the required functionality.
345  */
346 extern struct proc_info_list *lookup_processor_type(unsigned int);
347
348 void __init early_print(const char *str, ...)
349 {
350         extern void printascii(const char *);
351         char buf[256];
352         va_list ap;
353
354         va_start(ap, str);
355         vsnprintf(buf, sizeof(buf), str, ap);
356         va_end(ap);
357
358 #ifdef CONFIG_DEBUG_LL
359         printascii(buf);
360 #endif
361         printk("%s", buf);
362 }
363
364 static void __init feat_v6_fixup(void)
365 {
366         int id = read_cpuid_id();
367
368         if ((id & 0xff0f0000) != 0x41070000)
369                 return;
370
371         /*
372          * HWCAP_TLS is available only on 1136 r1p0 and later,
373          * see also kuser_get_tls_init.
374          */
375         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
376                 elf_hwcap &= ~HWCAP_TLS;
377 }
378
379 /*
380  * cpu_init - initialise one CPU.
381  *
382  * cpu_init sets up the per-CPU stacks.
383  */
384 void cpu_init(void)
385 {
386         unsigned int cpu = smp_processor_id();
387         struct stack *stk = &stacks[cpu];
388
389         if (cpu >= NR_CPUS) {
390                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
391                 BUG();
392         }
393
394         cpu_proc_init();
395
396         /*
397          * Define the placement constraint for the inline asm directive below.
398          * In Thumb-2, msr with an immediate value is not allowed.
399          */
400 #ifdef CONFIG_THUMB2_KERNEL
401 #define PLC     "r"
402 #else
403 #define PLC     "I"
404 #endif
405
406         /*
407          * setup stacks for re-entrant exception handlers
408          */
409         __asm__ (
410         "msr    cpsr_c, %1\n\t"
411         "add    r14, %0, %2\n\t"
412         "mov    sp, r14\n\t"
413         "msr    cpsr_c, %3\n\t"
414         "add    r14, %0, %4\n\t"
415         "mov    sp, r14\n\t"
416         "msr    cpsr_c, %5\n\t"
417         "add    r14, %0, %6\n\t"
418         "mov    sp, r14\n\t"
419         "msr    cpsr_c, %7"
420             :
421             : "r" (stk),
422               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
423               "I" (offsetof(struct stack, irq[0])),
424               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
425               "I" (offsetof(struct stack, abt[0])),
426               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
427               "I" (offsetof(struct stack, und[0])),
428               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
429             : "r14");
430 }
431
432 int __cpu_logical_map[NR_CPUS];
433
434 void __init smp_setup_processor_id(void)
435 {
436         int i;
437         u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
438
439         cpu_logical_map(0) = cpu;
440         for (i = 1; i < NR_CPUS; ++i)
441                 cpu_logical_map(i) = i == cpu ? 0 : i;
442
443         printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
444 }
445
446 static void __init setup_processor(void)
447 {
448         struct proc_info_list *list;
449
450         /*
451          * locate processor in the list of supported processor
452          * types.  The linker builds this table for us from the
453          * entries in arch/arm/mm/proc-*.S
454          */
455         list = lookup_processor_type(read_cpuid_id());
456         if (!list) {
457                 printk("CPU configuration botched (ID %08x), unable "
458                        "to continue.\n", read_cpuid_id());
459                 while (1);
460         }
461
462         cpu_name = list->cpu_name;
463         __cpu_architecture = __get_cpu_architecture();
464
465 #ifdef MULTI_CPU
466         processor = *list->proc;
467 #endif
468 #ifdef MULTI_TLB
469         cpu_tlb = *list->tlb;
470 #endif
471 #ifdef MULTI_USER
472         cpu_user = *list->user;
473 #endif
474 #ifdef MULTI_CACHE
475         cpu_cache = *list->cache;
476 #endif
477
478         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
479                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
480                proc_arch[cpu_architecture()], cr_alignment);
481
482         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
483                  list->arch_name, ENDIANNESS);
484         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
485                  list->elf_name, ENDIANNESS);
486         elf_hwcap = list->elf_hwcap;
487 #ifndef CONFIG_ARM_THUMB
488         elf_hwcap &= ~HWCAP_THUMB;
489 #endif
490
491         feat_v6_fixup();
492
493         cacheid_init();
494         cpu_init();
495 }
496
497 void __init dump_machine_table(void)
498 {
499         struct machine_desc *p;
500
501         early_print("Available machine support:\n\nID (hex)\tNAME\n");
502         for_each_machine_desc(p)
503                 early_print("%08x\t%s\n", p->nr, p->name);
504
505         early_print("\nPlease check your kernel config and/or bootloader.\n");
506
507         while (true)
508                 /* can't use cpu_relax() here as it may require MMU setup */;
509 }
510
511 int __init arm_add_memory(phys_addr_t start, unsigned long size)
512 {
513         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
514
515         if (meminfo.nr_banks >= NR_BANKS) {
516                 printk(KERN_CRIT "NR_BANKS too low, "
517                         "ignoring memory at 0x%08llx\n", (long long)start);
518                 return -EINVAL;
519         }
520
521         /*
522          * Ensure that start/size are aligned to a page boundary.
523          * Size is appropriately rounded down, start is rounded up.
524          */
525         size -= start & ~PAGE_MASK;
526         bank->start = PAGE_ALIGN(start);
527
528 #ifndef CONFIG_LPAE
529         if (bank->start + size < bank->start) {
530                 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
531                         "32-bit physical address space\n", (long long)start);
532                 /*
533                  * To ensure bank->start + bank->size is representable in
534                  * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
535                  * This means we lose a page after masking.
536                  */
537                 size = ULONG_MAX - bank->start;
538         }
539 #endif
540
541         bank->size = size & PAGE_MASK;
542
543         /*
544          * Check whether this memory region has non-zero size or
545          * invalid node number.
546          */
547         if (bank->size == 0)
548                 return -EINVAL;
549
550         meminfo.nr_banks++;
551         return 0;
552 }
553
554 /*
555  * Pick out the memory size.  We look for mem=size@start,
556  * where start and size are "size[KkMm]"
557  */
558 static int __init early_mem(char *p)
559 {
560         static int usermem __initdata = 0;
561         unsigned long size;
562         phys_addr_t start;
563         char *endp;
564
565         /*
566          * If the user specifies memory size, we
567          * blow away any automatically generated
568          * size.
569          */
570         if (usermem == 0) {
571                 usermem = 1;
572                 meminfo.nr_banks = 0;
573         }
574
575         start = PHYS_OFFSET;
576         size  = memparse(p, &endp);
577         if (*endp == '@')
578                 start = memparse(endp + 1, NULL);
579
580         arm_add_memory(start, size);
581
582         return 0;
583 }
584 early_param("mem", early_mem);
585
586 static void __init
587 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
588 {
589 #ifdef CONFIG_BLK_DEV_RAM
590         extern int rd_size, rd_image_start, rd_prompt, rd_doload;
591
592         rd_image_start = image_start;
593         rd_prompt = prompt;
594         rd_doload = doload;
595
596         if (rd_sz)
597                 rd_size = rd_sz;
598 #endif
599 }
600
601 static void __init request_standard_resources(struct machine_desc *mdesc)
602 {
603         struct memblock_region *region;
604         struct resource *res;
605
606         kernel_code.start   = virt_to_phys(_text);
607         kernel_code.end     = virt_to_phys(_etext - 1);
608         kernel_data.start   = virt_to_phys(_sdata);
609         kernel_data.end     = virt_to_phys(_end - 1);
610
611         for_each_memblock(memory, region) {
612                 res = alloc_bootmem_low(sizeof(*res));
613                 res->name  = "System RAM";
614                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
615                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
616                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
617
618                 request_resource(&iomem_resource, res);
619
620                 if (kernel_code.start >= res->start &&
621                     kernel_code.end <= res->end)
622                         request_resource(res, &kernel_code);
623                 if (kernel_data.start >= res->start &&
624                     kernel_data.end <= res->end)
625                         request_resource(res, &kernel_data);
626         }
627
628         if (mdesc->video_start) {
629                 video_ram.start = mdesc->video_start;
630                 video_ram.end   = mdesc->video_end;
631                 request_resource(&iomem_resource, &video_ram);
632         }
633
634         /*
635          * Some machines don't have the possibility of ever
636          * possessing lp0, lp1 or lp2
637          */
638         if (mdesc->reserve_lp0)
639                 request_resource(&ioport_resource, &lp0);
640         if (mdesc->reserve_lp1)
641                 request_resource(&ioport_resource, &lp1);
642         if (mdesc->reserve_lp2)
643                 request_resource(&ioport_resource, &lp2);
644 }
645
646 /*
647  *  Tag parsing.
648  *
649  * This is the new way of passing data to the kernel at boot time.  Rather
650  * than passing a fixed inflexible structure to the kernel, we pass a list
651  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
652  * tag for the list to be recognised (to distinguish the tagged list from
653  * a param_struct).  The list is terminated with a zero-length tag (this tag
654  * is not parsed in any way).
655  */
656 static int __init parse_tag_core(const struct tag *tag)
657 {
658         if (tag->hdr.size > 2) {
659                 if ((tag->u.core.flags & 1) == 0)
660                         root_mountflags &= ~MS_RDONLY;
661                 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
662         }
663         return 0;
664 }
665
666 __tagtable(ATAG_CORE, parse_tag_core);
667
668 static int __init parse_tag_mem32(const struct tag *tag)
669 {
670         return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
671 }
672
673 __tagtable(ATAG_MEM, parse_tag_mem32);
674
675 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
676 struct screen_info screen_info = {
677  .orig_video_lines      = 30,
678  .orig_video_cols       = 80,
679  .orig_video_mode       = 0,
680  .orig_video_ega_bx     = 0,
681  .orig_video_isVGA      = 1,
682  .orig_video_points     = 8
683 };
684
685 static int __init parse_tag_videotext(const struct tag *tag)
686 {
687         screen_info.orig_x            = tag->u.videotext.x;
688         screen_info.orig_y            = tag->u.videotext.y;
689         screen_info.orig_video_page   = tag->u.videotext.video_page;
690         screen_info.orig_video_mode   = tag->u.videotext.video_mode;
691         screen_info.orig_video_cols   = tag->u.videotext.video_cols;
692         screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
693         screen_info.orig_video_lines  = tag->u.videotext.video_lines;
694         screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
695         screen_info.orig_video_points = tag->u.videotext.video_points;
696         return 0;
697 }
698
699 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
700 #endif
701
702 static int __init parse_tag_ramdisk(const struct tag *tag)
703 {
704         setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
705                       (tag->u.ramdisk.flags & 2) == 0,
706                       tag->u.ramdisk.start, tag->u.ramdisk.size);
707         return 0;
708 }
709
710 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
711
712 static int __init parse_tag_serialnr(const struct tag *tag)
713 {
714         system_serial_low = tag->u.serialnr.low;
715         system_serial_high = tag->u.serialnr.high;
716         return 0;
717 }
718
719 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
720
721 static int __init parse_tag_revision(const struct tag *tag)
722 {
723         system_rev = tag->u.revision.rev;
724         return 0;
725 }
726
727 __tagtable(ATAG_REVISION, parse_tag_revision);
728
729 static int __init parse_tag_cmdline(const struct tag *tag)
730 {
731 #if defined(CONFIG_CMDLINE_EXTEND)
732         strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
733         strlcat(default_command_line, tag->u.cmdline.cmdline,
734                 COMMAND_LINE_SIZE);
735 #elif defined(CONFIG_CMDLINE_FORCE)
736         pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
737 #else
738         strlcpy(default_command_line, tag->u.cmdline.cmdline,
739                 COMMAND_LINE_SIZE);
740 #endif
741         return 0;
742 }
743
744 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
745
746 /*
747  * Scan the tag table for this tag, and call its parse function.
748  * The tag table is built by the linker from all the __tagtable
749  * declarations.
750  */
751 static int __init parse_tag(const struct tag *tag)
752 {
753         extern struct tagtable __tagtable_begin, __tagtable_end;
754         struct tagtable *t;
755
756         for (t = &__tagtable_begin; t < &__tagtable_end; t++)
757                 if (tag->hdr.tag == t->tag) {
758                         t->parse(tag);
759                         break;
760                 }
761
762         return t < &__tagtable_end;
763 }
764
765 /*
766  * Parse all tags in the list, checking both the global and architecture
767  * specific tag tables.
768  */
769 static void __init parse_tags(const struct tag *t)
770 {
771         for (; t->hdr.size; t = tag_next(t))
772                 if (!parse_tag(t))
773                         printk(KERN_WARNING
774                                 "Ignoring unrecognised tag 0x%08x\n",
775                                 t->hdr.tag);
776 }
777
778 /*
779  * This holds our defaults.
780  */
781 static struct init_tags {
782         struct tag_header hdr1;
783         struct tag_core   core;
784         struct tag_header hdr2;
785         struct tag_mem32  mem;
786         struct tag_header hdr3;
787 } init_tags __initdata = {
788         { tag_size(tag_core), ATAG_CORE },
789         { 1, PAGE_SIZE, 0xff },
790         { tag_size(tag_mem32), ATAG_MEM },
791         { MEM_SIZE },
792         { 0, ATAG_NONE }
793 };
794
795 static int __init customize_machine(void)
796 {
797         /* customizes platform devices, or adds new ones */
798         if (machine_desc->init_machine)
799                 machine_desc->init_machine();
800         return 0;
801 }
802 arch_initcall(customize_machine);
803
804 #ifdef CONFIG_KEXEC
805 static inline unsigned long long get_total_mem(void)
806 {
807         unsigned long total;
808
809         total = max_low_pfn - min_low_pfn;
810         return total << PAGE_SHIFT;
811 }
812
813 /**
814  * reserve_crashkernel() - reserves memory are for crash kernel
815  *
816  * This function reserves memory area given in "crashkernel=" kernel command
817  * line parameter. The memory reserved is used by a dump capture kernel when
818  * primary kernel is crashing.
819  */
820 static void __init reserve_crashkernel(void)
821 {
822         unsigned long long crash_size, crash_base;
823         unsigned long long total_mem;
824         int ret;
825
826         total_mem = get_total_mem();
827         ret = parse_crashkernel(boot_command_line, total_mem,
828                                 &crash_size, &crash_base);
829         if (ret)
830                 return;
831
832         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
833         if (ret < 0) {
834                 printk(KERN_WARNING "crashkernel reservation failed - "
835                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
836                 return;
837         }
838
839         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
840                "for crashkernel (System RAM: %ldMB)\n",
841                (unsigned long)(crash_size >> 20),
842                (unsigned long)(crash_base >> 20),
843                (unsigned long)(total_mem >> 20));
844
845         crashk_res.start = crash_base;
846         crashk_res.end = crash_base + crash_size - 1;
847         insert_resource(&iomem_resource, &crashk_res);
848 }
849 #else
850 static inline void reserve_crashkernel(void) {}
851 #endif /* CONFIG_KEXEC */
852
853 static void __init squash_mem_tags(struct tag *tag)
854 {
855         for (; tag->hdr.size; tag = tag_next(tag))
856                 if (tag->hdr.tag == ATAG_MEM)
857                         tag->hdr.tag = ATAG_NONE;
858 }
859
860 static struct machine_desc * __init setup_machine_tags(unsigned int nr)
861 {
862         struct tag *tags = (struct tag *)&init_tags;
863         struct machine_desc *mdesc = NULL, *p;
864         char *from = default_command_line;
865
866         init_tags.mem.start = PHYS_OFFSET;
867
868         /*
869          * locate machine in the list of supported machines.
870          */
871         for_each_machine_desc(p)
872                 if (nr == p->nr) {
873                         printk("Machine: %s\n", p->name);
874                         mdesc = p;
875                         break;
876                 }
877
878         if (!mdesc) {
879                 early_print("\nError: unrecognized/unsupported machine ID"
880                         " (r1 = 0x%08x).\n\n", nr);
881                 dump_machine_table(); /* does not return */
882         }
883
884         if (__atags_pointer)
885                 tags = phys_to_virt(__atags_pointer);
886         else if (mdesc->atag_offset)
887                 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
888
889 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
890         /*
891          * If we have the old style parameters, convert them to
892          * a tag list.
893          */
894         if (tags->hdr.tag != ATAG_CORE)
895                 convert_to_tag_list(tags);
896 #endif
897
898         if (tags->hdr.tag != ATAG_CORE) {
899 #if defined(CONFIG_OF)
900                 /*
901                  * If CONFIG_OF is set, then assume this is a reasonably
902                  * modern system that should pass boot parameters
903                  */
904                 early_print("Warning: Neither atags nor dtb found\n");
905 #endif
906                 tags = (struct tag *)&init_tags;
907         }
908
909         if (mdesc->fixup)
910                 mdesc->fixup(tags, &from, &meminfo);
911
912         if (tags->hdr.tag == ATAG_CORE) {
913                 if (meminfo.nr_banks != 0)
914                         squash_mem_tags(tags);
915                 save_atags(tags);
916                 parse_tags(tags);
917         }
918
919         /* parse_early_param needs a boot_command_line */
920         strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
921
922         return mdesc;
923 }
924
925 static int __init meminfo_cmp(const void *_a, const void *_b)
926 {
927         const struct membank *a = _a, *b = _b;
928         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
929         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
930 }
931
932 void __init setup_arch(char **cmdline_p)
933 {
934         struct machine_desc *mdesc;
935
936         setup_processor();
937         mdesc = setup_machine_fdt(__atags_pointer);
938         if (!mdesc)
939                 mdesc = setup_machine_tags(machine_arch_type);
940         machine_desc = mdesc;
941         machine_name = mdesc->name;
942
943         setup_dma_zone(mdesc);
944
945         if (mdesc->restart_mode)
946                 reboot_setup(&mdesc->restart_mode);
947
948         init_mm.start_code = (unsigned long) _text;
949         init_mm.end_code   = (unsigned long) _etext;
950         init_mm.end_data   = (unsigned long) _edata;
951         init_mm.brk        = (unsigned long) _end;
952
953         /* populate cmd_line too for later use, preserving boot_command_line */
954         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
955         *cmdline_p = cmd_line;
956
957         parse_early_param();
958
959         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
960         sanity_check_meminfo();
961         arm_memblock_init(&meminfo, mdesc);
962
963         paging_init(mdesc);
964         request_standard_resources(mdesc);
965
966         if (mdesc->restart)
967                 arm_pm_restart = mdesc->restart;
968
969         unflatten_device_tree();
970
971 #ifdef CONFIG_SMP
972         if (is_smp())
973                 smp_init_cpus();
974 #endif
975         reserve_crashkernel();
976
977         tcm_init();
978
979 #ifdef CONFIG_MULTI_IRQ_HANDLER
980         handle_arch_irq = mdesc->handle_irq;
981 #endif
982
983 #ifdef CONFIG_VT
984 #if defined(CONFIG_VGA_CONSOLE)
985         conswitchp = &vga_con;
986 #elif defined(CONFIG_DUMMY_CONSOLE)
987         conswitchp = &dummy_con;
988 #endif
989 #endif
990
991         if (mdesc->init_early)
992                 mdesc->init_early();
993 }
994
995
996 static int __init topology_init(void)
997 {
998         int cpu;
999
1000         for_each_possible_cpu(cpu) {
1001                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1002                 cpuinfo->cpu.hotpluggable = 1;
1003                 register_cpu(&cpuinfo->cpu, cpu);
1004         }
1005
1006         return 0;
1007 }
1008 subsys_initcall(topology_init);
1009
1010 #ifdef CONFIG_HAVE_PROC_CPU
1011 static int __init proc_cpu_init(void)
1012 {
1013         struct proc_dir_entry *res;
1014
1015         res = proc_mkdir("cpu", NULL);
1016         if (!res)
1017                 return -ENOMEM;
1018         return 0;
1019 }
1020 fs_initcall(proc_cpu_init);
1021 #endif
1022
1023 static const char *hwcap_str[] = {
1024         "swp",
1025         "half",
1026         "thumb",
1027         "26bit",
1028         "fastmult",
1029         "fpa",
1030         "vfp",
1031         "edsp",
1032         "java",
1033         "iwmmxt",
1034         "crunch",
1035         "thumbee",
1036         "neon",
1037         "vfpv3",
1038         "vfpv3d16",
1039         "tls",
1040         "vfpv4",
1041         "idiva",
1042         "idivt",
1043         NULL
1044 };
1045
1046 static int c_show(struct seq_file *m, void *v)
1047 {
1048         int i;
1049
1050         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
1051                    cpu_name, read_cpuid_id() & 15, elf_platform);
1052
1053 #if defined(CONFIG_SMP)
1054         for_each_online_cpu(i) {
1055                 /*
1056                  * glibc reads /proc/cpuinfo to determine the number of
1057                  * online processors, looking for lines beginning with
1058                  * "processor".  Give glibc what it expects.
1059                  */
1060                 seq_printf(m, "processor\t: %d\n", i);
1061                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
1062                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1063                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1064         }
1065 #else /* CONFIG_SMP */
1066         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1067                    loops_per_jiffy / (500000/HZ),
1068                    (loops_per_jiffy / (5000/HZ)) % 100);
1069 #endif
1070
1071         /* dump out the processor features */
1072         seq_puts(m, "Features\t: ");
1073
1074         for (i = 0; hwcap_str[i]; i++)
1075                 if (elf_hwcap & (1 << i))
1076                         seq_printf(m, "%s ", hwcap_str[i]);
1077
1078         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1079         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1080
1081         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1082                 /* pre-ARM7 */
1083                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1084         } else {
1085                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1086                         /* ARM7 */
1087                         seq_printf(m, "CPU variant\t: 0x%02x\n",
1088                                    (read_cpuid_id() >> 16) & 127);
1089                 } else {
1090                         /* post-ARM7 */
1091                         seq_printf(m, "CPU variant\t: 0x%x\n",
1092                                    (read_cpuid_id() >> 20) & 15);
1093                 }
1094                 seq_printf(m, "CPU part\t: 0x%03x\n",
1095                            (read_cpuid_id() >> 4) & 0xfff);
1096         }
1097         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1098
1099         seq_puts(m, "\n");
1100
1101         seq_printf(m, "Hardware\t: %s\n", machine_name);
1102         seq_printf(m, "Revision\t: %04x\n", system_rev);
1103         seq_printf(m, "Serial\t\t: %08x%08x\n",
1104                    system_serial_high, system_serial_low);
1105
1106         return 0;
1107 }
1108
1109 static void *c_start(struct seq_file *m, loff_t *pos)
1110 {
1111         return *pos < 1 ? (void *)1 : NULL;
1112 }
1113
1114 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1115 {
1116         ++*pos;
1117         return NULL;
1118 }
1119
1120 static void c_stop(struct seq_file *m, void *v)
1121 {
1122 }
1123
1124 const struct seq_operations cpuinfo_op = {
1125         .start  = c_start,
1126         .next   = c_next,
1127         .stop   = c_stop,
1128         .show   = c_show
1129 };