2 * Architecture specific (PPC64) functions for kexec based crash dumps.
4 * Copyright (C) 2005, IBM Corp.
6 * Created by: Haren Myneni
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
13 #include <linux/kernel.h>
14 #include <linux/smp.h>
15 #include <linux/reboot.h>
16 #include <linux/kexec.h>
17 #include <linux/export.h>
18 #include <linux/crash_dump.h>
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/irq.h>
22 #include <linux/types.h>
24 #include <asm/processor.h>
25 #include <asm/machdep.h>
26 #include <asm/kexec.h>
27 #include <asm/kdump.h>
30 #include <asm/system.h>
31 #include <asm/setjmp.h>
33 /* This keeps a track of which one is the crashing cpu. */
34 int crashing_cpu = -1;
35 static cpumask_t cpus_in_crash = CPU_MASK_NONE;
37 #define CRASH_HANDLER_MAX 3
38 /* NULL terminated list of shutdown handles */
39 static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
40 static DEFINE_SPINLOCK(crash_handlers_lock);
42 static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
43 static int crash_shutdown_cpu = -1;
45 static int handle_fault(struct pt_regs *regs)
47 if (crash_shutdown_cpu == smp_processor_id())
48 longjmp(crash_shutdown_buf, 1);
54 void crash_ipi_callback(struct pt_regs *regs)
56 int cpu = smp_processor_id();
62 if (!cpumask_test_cpu(cpu, &cpus_in_crash))
63 crash_save_cpu(regs, cpu);
64 cpumask_set_cpu(cpu, &cpus_in_crash);
67 * Starting the kdump boot.
68 * This barrier is needed to make sure that all CPUs are stopped.
70 while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash))
73 if (ppc_md.kexec_cpu_down)
74 ppc_md.kexec_cpu_down(1, 1);
85 static void crash_kexec_prepare_cpus(int cpu)
88 unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
90 int (*old_handler)(struct pt_regs *regs);
92 printk(KERN_EMERG "Sending IPI to other CPUs\n");
94 crash_send_ipi(crash_ipi_callback);
99 * FIXME: Until we will have the way to stop other CPUs reliably,
100 * the crash CPU will send an IPI and wait for other CPUs to
102 * Delay of at least 10 seconds.
105 while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
110 /* Would it be better to replace the trap vector here? */
112 if (cpumask_weight(&cpus_in_crash) >= ncpus) {
113 printk(KERN_EMERG "IPI complete\n");
117 printk(KERN_EMERG "ERROR: %d cpu(s) not responding\n",
118 ncpus - cpumask_weight(&cpus_in_crash));
121 * If we have a panic timeout set then we can't wait indefinitely
122 * for someone to activate system reset. We also give up on the
123 * second time through if system reset fail to work.
125 if ((panic_timeout > 0) || (tries > 0))
129 * A system reset will cause all CPUs to take an 0x100 exception.
130 * The primary CPU returns here via setjmp, and the secondary
131 * CPUs reexecute the crash_kexec_secondary path.
133 old_handler = __debugger;
134 __debugger = handle_fault;
135 crash_shutdown_cpu = smp_processor_id();
137 if (setjmp(crash_shutdown_buf) == 0) {
138 printk(KERN_EMERG "Activate system reset (dumprestart) "
139 "to stop other cpu(s)\n");
142 * A system reset will force all CPUs to execute the
143 * crash code again. We need to reset cpus_in_crash so we
144 * wait for everyone to do this.
146 cpus_in_crash = CPU_MASK_NONE;
149 while (cpumask_weight(&cpus_in_crash) < ncpus)
153 crash_shutdown_cpu = -1;
154 __debugger = old_handler;
161 * This function will be called by secondary cpus.
163 void crash_kexec_secondary(struct pt_regs *regs)
168 local_irq_save(flags);
170 /* Wait 500ms for the primary crash CPU to signal its progress */
171 while (crashing_cpu < 0) {
173 /* No response, kdump image may not have been loaded */
174 local_irq_restore(flags);
182 crash_ipi_callback(regs);
185 #else /* ! CONFIG_SMP */
187 static void crash_kexec_prepare_cpus(int cpu)
190 * move the secondaries to us so that we can copy
191 * the new kernel 0-0x100 safely
193 * do this if kexec in setup.c ?
202 void crash_kexec_secondary(struct pt_regs *regs)
205 #endif /* CONFIG_SMP */
207 /* wait for all the CPUs to hit real mode but timeout if they don't come in */
208 #if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_64)
209 static void crash_kexec_wait_realmode(int cpu)
215 for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
219 while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
221 if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0))
230 static inline void crash_kexec_wait_realmode(int cpu) {}
231 #endif /* CONFIG_SMP && CONFIG_PPC_STD_MMU_64 */
234 * Register a function to be called on shutdown. Only use this if you
235 * can't reset your device in the second kernel.
237 int crash_shutdown_register(crash_shutdown_t handler)
241 spin_lock(&crash_handlers_lock);
242 for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
243 if (!crash_shutdown_handles[i]) {
244 /* Insert handle at first empty entry */
245 crash_shutdown_handles[i] = handler;
250 if (i == CRASH_HANDLER_MAX) {
251 printk(KERN_ERR "Crash shutdown handles full, "
252 "not registered.\n");
256 spin_unlock(&crash_handlers_lock);
259 EXPORT_SYMBOL(crash_shutdown_register);
261 int crash_shutdown_unregister(crash_shutdown_t handler)
265 spin_lock(&crash_handlers_lock);
266 for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
267 if (crash_shutdown_handles[i] == handler)
270 if (i == CRASH_HANDLER_MAX) {
271 printk(KERN_ERR "Crash shutdown handle not found\n");
274 /* Shift handles down */
275 for (; crash_shutdown_handles[i]; i++)
276 crash_shutdown_handles[i] =
277 crash_shutdown_handles[i+1];
281 spin_unlock(&crash_handlers_lock);
284 EXPORT_SYMBOL(crash_shutdown_unregister);
286 void default_machine_crash_shutdown(struct pt_regs *regs)
289 int (*old_handler)(struct pt_regs *regs);
292 * This function is only called after the system
293 * has panicked or is otherwise in a critical state.
294 * The minimum amount of code to allow a kexec'd kernel
295 * to run successfully needs to happen here.
297 * In practice this means stopping other cpus in
299 * The kernel is broken so disable interrupts.
304 * Make a note of crashing cpu. Will be used in machine_kexec
305 * such that another IPI will not be sent.
307 crashing_cpu = smp_processor_id();
308 crash_save_cpu(regs, crashing_cpu);
309 crash_kexec_prepare_cpus(crashing_cpu);
310 cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
311 crash_kexec_wait_realmode(crashing_cpu);
313 machine_kexec_mask_interrupts();
316 * Call registered shutdown routines safely. Swap out
317 * __debugger_fault_handler, and replace on exit.
319 old_handler = __debugger_fault_handler;
320 __debugger_fault_handler = handle_fault;
321 crash_shutdown_cpu = smp_processor_id();
322 for (i = 0; crash_shutdown_handles[i]; i++) {
323 if (setjmp(crash_shutdown_buf) == 0) {
325 * Insert syncs and delay to ensure
326 * instructions in the dangerous region don't
327 * leak away from this protected region.
329 asm volatile("sync; isync");
330 /* dangerous region */
331 crash_shutdown_handles[i]();
332 asm volatile("sync; isync");
335 crash_shutdown_cpu = -1;
336 __debugger_fault_handler = old_handler;
338 if (ppc_md.kexec_cpu_down)
339 ppc_md.kexec_cpu_down(1, 0);