]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/control.c
core: Add support for per-CPU statistics
[jailhouse.git] / hypervisor / control.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  *
6  * Authors:
7  *  Jan Kiszka <jan.kiszka@siemens.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/entry.h>
14 #include <jailhouse/control.h>
15 #include <jailhouse/printk.h>
16 #include <jailhouse/paging.h>
17 #include <jailhouse/processor.h>
18 #include <jailhouse/string.h>
19 #include <jailhouse/utils.h>
20 #include <asm/bitops.h>
21 #include <asm/spinlock.h>
22
23 enum msg_type {MSG_REQUEST, MSG_INFORMATION};
24 enum failure_mode {ABORT_ON_ERROR, WARN_ON_ERROR};
25 enum management_task {CELL_START, CELL_SET_LOADABLE, CELL_DESTROY};
26
27 struct jailhouse_system *system_config;
28
29 static DEFINE_SPINLOCK(shutdown_lock);
30 static unsigned int num_cells = 1;
31
32 #define for_each_cell(c)        for ((c) = &root_cell; (c); (c) = (c)->next)
33 #define for_each_non_root_cell(c) \
34         for ((c) = root_cell.next; (c); (c) = (c)->next)
35
36 unsigned int next_cpu(unsigned int cpu, struct cpu_set *cpu_set, int exception)
37 {
38         do
39                 cpu++;
40         while (cpu <= cpu_set->max_cpu_id &&
41                (cpu == exception || !test_bit(cpu, cpu_set->bitmap)));
42         return cpu;
43 }
44
45 bool cpu_id_valid(unsigned long cpu_id)
46 {
47         const unsigned long *system_cpu_set =
48                 jailhouse_cell_cpu_set(&system_config->root_cell);
49
50         return (cpu_id < system_config->root_cell.cpu_set_size * 8 &&
51                 test_bit(cpu_id, system_cpu_set));
52 }
53
54 static void cell_suspend(struct cell *cell, struct per_cpu *cpu_data)
55 {
56         unsigned int cpu;
57
58         for_each_cpu_except(cpu, cell->cpu_set, cpu_data->cpu_id)
59                 arch_suspend_cpu(cpu);
60 }
61
62 static void cell_resume(struct per_cpu *cpu_data)
63 {
64         unsigned int cpu;
65
66         for_each_cpu_except(cpu, cpu_data->cell->cpu_set, cpu_data->cpu_id)
67                 arch_resume_cpu(cpu);
68 }
69
70 /**
71  * cell_send_message - Deliver a message to cell and wait for the reply
72  * @cell: target cell
73  * @message: message code to be sent (JAILHOUSE_MSG_*)
74  * @type: message type, defines the valid replies
75  *
76  * Returns true if a request message was approved or reception of an
77  * information message was acknowledged by the target cell. It also return true
78  * of the target cell does not support a communication region, is shut down or
79  * in failed state. Return false on request denial or invalid replies.
80  */
81 static bool cell_send_message(struct cell *cell, u32 message,
82                               enum msg_type type)
83 {
84         if (cell->config->flags & JAILHOUSE_CELL_PASSIVE_COMMREG)
85                 return true;
86
87         jailhouse_send_msg_to_cell(&cell->comm_page.comm_region, message);
88
89         while (1) {
90                 u32 reply = cell->comm_page.comm_region.reply_from_cell;
91                 u32 cell_state = cell->comm_page.comm_region.cell_state;
92
93                 if (cell_state == JAILHOUSE_CELL_SHUT_DOWN ||
94                     cell_state == JAILHOUSE_CELL_FAILED)
95                         return true;
96
97                 if ((type == MSG_REQUEST &&
98                      reply == JAILHOUSE_MSG_REQUEST_APPROVED) ||
99                     (type == MSG_INFORMATION &&
100                      reply == JAILHOUSE_MSG_RECEIVED))
101                         return true;
102
103                 if (reply != JAILHOUSE_MSG_NONE)
104                         return false;
105
106                 cpu_relax();
107         }
108 }
109
110 static bool cell_reconfig_ok(struct cell *excluded_cell)
111 {
112         struct cell *cell;
113
114         for_each_non_root_cell(cell)
115                 if (cell != excluded_cell &&
116                     cell->comm_page.comm_region.cell_state ==
117                                 JAILHOUSE_CELL_RUNNING_LOCKED)
118                         return false;
119         return true;
120 }
121
122 static void cell_reconfig_completed(void)
123 {
124         struct cell *cell;
125
126         for_each_non_root_cell(cell)
127                 cell_send_message(cell, JAILHOUSE_MSG_RECONFIG_COMPLETED,
128                                   MSG_INFORMATION);
129 }
130
131 static unsigned int get_free_cell_id(void)
132 {
133         unsigned int id = 0;
134         struct cell *cell;
135
136 retry:
137         for_each_cell(cell)
138                 if (cell->id == id) {
139                         id++;
140                         goto retry;
141                 }
142
143         return id;
144 }
145
146 /* cell must be zero-initialized */
147 int cell_init(struct cell *cell, bool copy_cpu_set)
148 {
149         const unsigned long *config_cpu_set =
150                 jailhouse_cell_cpu_set(cell->config);
151         unsigned long cpu_set_size = cell->config->cpu_set_size;
152         struct cpu_set *cpu_set;
153
154         cell->id = get_free_cell_id();
155
156         if (cpu_set_size > PAGE_SIZE)
157                 return -EINVAL;
158         else if (cpu_set_size > sizeof(cell->small_cpu_set.bitmap)) {
159                 cpu_set = page_alloc(&mem_pool, 1);
160                 if (!cpu_set)
161                         return -ENOMEM;
162                 cpu_set->max_cpu_id =
163                         ((PAGE_SIZE - sizeof(unsigned long)) * 8) - 1;
164         } else {
165                 cpu_set = &cell->small_cpu_set;
166                 cpu_set->max_cpu_id =
167                         (sizeof(cell->small_cpu_set.bitmap) * 8) - 1;
168         }
169         cell->cpu_set = cpu_set;
170         if (copy_cpu_set)
171                 memcpy(cell->cpu_set->bitmap, config_cpu_set, cpu_set_size);
172
173         return 0;
174 }
175
176 static void destroy_cpu_set(struct cell *cell)
177 {
178         if (cell->cpu_set != &cell->small_cpu_set)
179                 page_free(&mem_pool, cell->cpu_set, 1);
180 }
181
182 int check_mem_regions(const struct jailhouse_cell_desc *config)
183 {
184         const struct jailhouse_memory *mem =
185                 jailhouse_cell_mem_regions(config);
186         unsigned int n;
187
188         for (n = 0; n < config->num_memory_regions; n++, mem++) {
189                 if (mem->phys_start & ~PAGE_MASK ||
190                     mem->virt_start & ~PAGE_MASK ||
191                     mem->size & ~PAGE_MASK ||
192                     mem->flags & ~JAILHOUSE_MEM_VALID_FLAGS) {
193                         printk("FATAL: Invalid memory bar (%p, %p, %p, %x)\n",
194                                mem->phys_start, mem->virt_start, mem->size,
195                                mem->flags);
196                         return -EINVAL;
197                 }
198         }
199         return 0;
200 }
201
202 static bool address_in_region(unsigned long addr,
203                               const struct jailhouse_memory *region)
204 {
205         return addr >= region->phys_start &&
206                addr < (region->phys_start + region->size);
207 }
208
209 static int unmap_from_root_cell(const struct jailhouse_memory *mem)
210 {
211         /*
212          * arch_unmap_memory_region uses the virtual address of the memory
213          * region. As only the root cell has a guaranteed 1:1 mapping, make a
214          * copy where we ensure this.
215          */
216         struct jailhouse_memory tmp = *mem;
217
218         tmp.virt_start = tmp.phys_start;
219         return arch_unmap_memory_region(&root_cell, &tmp);
220 }
221
222 static int remap_to_root_cell(const struct jailhouse_memory *mem,
223                               enum failure_mode mode)
224 {
225         const struct jailhouse_memory *root_mem =
226                 jailhouse_cell_mem_regions(root_cell.config);
227         struct jailhouse_memory overlap;
228         unsigned int n;
229         int err = 0;
230
231         for (n = 0; n < root_cell.config->num_memory_regions;
232              n++, root_mem++) {
233                 if (address_in_region(mem->phys_start, root_mem)) {
234                         overlap.phys_start = mem->phys_start;
235                         overlap.size = root_mem->size -
236                                 (overlap.phys_start - root_mem->phys_start);
237                         if (overlap.size > mem->size)
238                                 overlap.size = mem->size;
239                 } else if (address_in_region(root_mem->phys_start, mem)) {
240                         overlap.phys_start = root_mem->phys_start;
241                         overlap.size = mem->size -
242                                 (overlap.phys_start - mem->phys_start);
243                         if (overlap.size > root_mem->size)
244                                 overlap.size = root_mem->size;
245                 } else
246                         continue;
247
248                 overlap.virt_start = root_mem->virt_start +
249                         overlap.phys_start - root_mem->phys_start;
250                 overlap.flags = root_mem->flags;
251
252                 err = arch_map_memory_region(&root_cell, &overlap);
253                 if (err) {
254                         if (mode == ABORT_ON_ERROR)
255                                 break;
256                         printk("WARNING: Failed to re-assign memory region "
257                                "to root cell\n");
258                 }
259         }
260         return err;
261 }
262
263 static void cell_destroy_internal(struct per_cpu *cpu_data, struct cell *cell)
264 {
265         const struct jailhouse_memory *mem =
266                 jailhouse_cell_mem_regions(cell->config);
267         unsigned int cpu, n;
268
269         for_each_cpu(cpu, cell->cpu_set) {
270                 arch_park_cpu(cpu);
271
272                 set_bit(cpu, root_cell.cpu_set->bitmap);
273                 per_cpu(cpu)->cell = &root_cell;
274                 per_cpu(cpu)->failed = false;
275                 memset(per_cpu(cpu)->stats, 0, sizeof(per_cpu(cpu)->stats));
276         }
277
278         for (n = 0; n < cell->config->num_memory_regions; n++, mem++) {
279                 /*
280                  * This cannot fail. The region was mapped as a whole before,
281                  * thus no hugepages need to be broken up to unmap it.
282                  */
283                 arch_unmap_memory_region(cell, mem);
284                 if (!(mem->flags & JAILHOUSE_MEM_COMM_REGION))
285                         remap_to_root_cell(mem, WARN_ON_ERROR);
286         }
287
288         arch_cell_destroy(cpu_data, cell);
289
290         arch_config_commit(cpu_data, cell);
291 }
292
293 static int cell_create(struct per_cpu *cpu_data, unsigned long config_address)
294 {
295         unsigned long mapping_addr = TEMPORARY_MAPPING_CPU_BASE(cpu_data);
296         unsigned long cfg_page_offs = config_address & ~PAGE_MASK;
297         unsigned long cfg_header_size, cfg_total_size;
298         const struct jailhouse_memory *mem;
299         struct jailhouse_cell_desc *cfg;
300         unsigned int cell_pages, cpu, n;
301         struct cell *cell, *last;
302         int err;
303
304         /* We do not support creation over non-root cells. */
305         if (cpu_data->cell != &root_cell)
306                 return -EPERM;
307
308         cell_suspend(&root_cell, cpu_data);
309
310         if (!cell_reconfig_ok(NULL)) {
311                 err = -EPERM;
312                 goto err_resume;
313         }
314
315         cfg_header_size = (config_address & ~PAGE_MASK) +
316                 sizeof(struct jailhouse_cell_desc);
317
318         err = page_map_create(&hv_paging_structs, config_address & PAGE_MASK,
319                               cfg_header_size, mapping_addr,
320                               PAGE_READONLY_FLAGS, PAGE_MAP_NON_COHERENT);
321         if (err)
322                 goto err_resume;
323
324         cfg = (struct jailhouse_cell_desc *)(mapping_addr + cfg_page_offs);
325         cfg_total_size = jailhouse_cell_config_size(cfg);
326         if (cfg_total_size + cfg_page_offs > NUM_TEMPORARY_PAGES * PAGE_SIZE) {
327                 err = -E2BIG;
328                 goto err_resume;
329         }
330
331         for_each_cell(cell)
332                 if (strcmp(cell->config->name, cfg->name) == 0) {
333                         err = -EEXIST;
334                         goto err_resume;
335                 }
336
337         err = page_map_create(&hv_paging_structs, config_address & PAGE_MASK,
338                               cfg_total_size + cfg_page_offs, mapping_addr,
339                               PAGE_READONLY_FLAGS, PAGE_MAP_NON_COHERENT);
340         if (err)
341                 goto err_resume;
342
343         err = check_mem_regions(cfg);
344         if (err)
345                 goto err_resume;
346
347         cell_pages = PAGE_ALIGN(sizeof(*cell) + cfg_total_size) / PAGE_SIZE;
348         cell = page_alloc(&mem_pool, cell_pages);
349         if (!cell) {
350                 err = -ENOMEM;
351                 goto err_resume;
352         }
353
354         cell->data_pages = cell_pages;
355         cell->config = ((void *)cell) + sizeof(*cell);
356         memcpy(cell->config, cfg, cfg_total_size);
357
358         err = cell_init(cell, true);
359         if (err)
360                 goto err_free_cell;
361
362         /* don't assign the CPU we are currently running on */
363         if (cpu_data->cpu_id <= cell->cpu_set->max_cpu_id &&
364             test_bit(cpu_data->cpu_id, cell->cpu_set->bitmap)) {
365                 err = -EBUSY;
366                 goto err_free_cpu_set;
367         }
368
369         /* the root cell's cpu set must be super-set of new cell's set */
370         if (root_cell.cpu_set->max_cpu_id < cell->cpu_set->max_cpu_id) {
371                 err = -EBUSY;
372                 goto err_free_cpu_set;
373         }
374         for_each_cpu(cpu, cell->cpu_set)
375                 if (!test_bit(cpu, root_cell.cpu_set->bitmap)) {
376                         err = -EBUSY;
377                         goto err_free_cpu_set;
378                 }
379
380         err = arch_cell_create(cpu_data, cell);
381         if (err)
382                 goto err_free_cpu_set;
383
384         for_each_cpu(cpu, cell->cpu_set) {
385                 arch_park_cpu(cpu);
386
387                 clear_bit(cpu, root_cell.cpu_set->bitmap);
388                 per_cpu(cpu)->cell = cell;
389                 memset(per_cpu(cpu)->stats, 0, sizeof(per_cpu(cpu)->stats));
390         }
391
392         /*
393          * Unmap the cell's memory regions from the root cell and map them to
394          * the new cell instead.
395          */
396         mem = jailhouse_cell_mem_regions(cell->config);
397         for (n = 0; n < cell->config->num_memory_regions; n++, mem++) {
398                 /*
399                  * Unmap exceptions:
400                  *  - the communication region is not backed by root memory
401                  */
402                 if (!(mem->flags & JAILHOUSE_MEM_COMM_REGION)) {
403                         err = unmap_from_root_cell(mem);
404                         if (err)
405                                 goto err_destroy_cell;
406                 }
407
408                 err = arch_map_memory_region(cell, mem);
409                 if (err)
410                         goto err_destroy_cell;
411         }
412
413         arch_config_commit(cpu_data, cell);
414
415         cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_SHUT_DOWN;
416
417         last = &root_cell;
418         while (last->next)
419                 last = last->next;
420         last->next = cell;
421         num_cells++;
422
423         cell_reconfig_completed();
424
425         printk("Created cell \"%s\"\n", cell->config->name);
426
427         page_map_dump_stats("after cell creation");
428
429         cell_resume(cpu_data);
430
431         return cell->id;
432
433 err_destroy_cell:
434         cell_destroy_internal(cpu_data, cell);
435 err_free_cpu_set:
436         destroy_cpu_set(cell);
437 err_free_cell:
438         page_free(&mem_pool, cell, cell_pages);
439
440 err_resume:
441         cell_resume(cpu_data);
442
443         return err;
444 }
445
446 static bool cell_shutdown_ok(struct cell *cell)
447 {
448         return cell_send_message(cell, JAILHOUSE_MSG_SHUTDOWN_REQUEST,
449                                  MSG_REQUEST);
450 }
451
452 static int cell_management_prologue(enum management_task task,
453                                     struct per_cpu *cpu_data, unsigned long id,
454                                     struct cell **cell_ptr)
455 {
456         /* We do not support management commands over non-root cells. */
457         if (cpu_data->cell != &root_cell)
458                 return -EPERM;
459
460         cell_suspend(&root_cell, cpu_data);
461
462         for_each_cell(*cell_ptr)
463                 if ((*cell_ptr)->id == id)
464                         break;
465
466         if (!*cell_ptr) {
467                 cell_resume(cpu_data);
468                 return -ENOENT;
469         }
470
471         /* root cell cannot be managed */
472         if (*cell_ptr == &root_cell) {
473                 cell_resume(cpu_data);
474                 return -EINVAL;
475         }
476
477         if ((task == CELL_DESTROY && !cell_reconfig_ok(*cell_ptr)) ||
478             !cell_shutdown_ok(*cell_ptr)) {
479                 cell_resume(cpu_data);
480                 return -EPERM;
481         }
482
483         cell_suspend(*cell_ptr, cpu_data);
484
485         return 0;
486 }
487
488 static int cell_start(struct per_cpu *cpu_data, unsigned long id)
489 {
490         const struct jailhouse_memory *mem;
491         unsigned int cpu, n;
492         struct cell *cell;
493         int err;
494
495         err = cell_management_prologue(CELL_START, cpu_data, id, &cell);
496         if (err)
497                 return err;
498
499         if (cell->loadable) {
500                 /* unmap all loadable memory regions from the root cell */
501                 mem = jailhouse_cell_mem_regions(cell->config);
502                 for (n = 0; n < cell->config->num_memory_regions; n++, mem++)
503                         if (mem->flags & JAILHOUSE_MEM_LOADABLE) {
504                                 err = unmap_from_root_cell(mem);
505                                 if (err)
506                                         goto out_resume;
507                         }
508
509                 arch_config_commit(cpu_data, NULL);
510
511                 cell->loadable = false;
512         }
513
514         /* present a consistent Communication Region state to the cell */
515         cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_RUNNING;
516         cell->comm_page.comm_region.msg_to_cell = JAILHOUSE_MSG_NONE;
517
518         for_each_cpu(cpu, cell->cpu_set) {
519                 per_cpu(cpu)->failed = false;
520                 arch_reset_cpu(cpu);
521         }
522
523         printk("Started cell \"%s\"\n", cell->config->name);
524
525 out_resume:
526         cell_resume(cpu_data);
527
528         return err;
529 }
530
531 static int cell_set_loadable(struct per_cpu *cpu_data, unsigned long id)
532 {
533         const struct jailhouse_memory *mem;
534         unsigned int cpu, n;
535         struct cell *cell;
536         int err;
537
538         err = cell_management_prologue(CELL_SET_LOADABLE, cpu_data, id, &cell);
539         if (err)
540                 return err;
541
542         for_each_cpu(cpu, cell->cpu_set) {
543                 per_cpu(cpu)->failed = false;
544                 arch_park_cpu(cpu);
545         }
546
547         if (cell->loadable)
548                 goto out_resume;
549
550         cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_SHUT_DOWN;
551         cell->loadable = true;
552
553         /* map all loadable memory regions into the root cell */
554         mem = jailhouse_cell_mem_regions(cell->config);
555         for (n = 0; n < cell->config->num_memory_regions; n++, mem++)
556                 if (mem->flags & JAILHOUSE_MEM_LOADABLE) {
557                         err = remap_to_root_cell(mem, ABORT_ON_ERROR);
558                         if (err)
559                                 goto out_resume;
560                 }
561
562         arch_config_commit(cpu_data, NULL);
563
564         printk("Cell \"%s\" can be loaded\n", cell->config->name);
565
566 out_resume:
567         cell_resume(cpu_data);
568
569         return err;
570 }
571
572 static int cell_destroy(struct per_cpu *cpu_data, unsigned long id)
573 {
574         struct cell *cell, *previous;
575         int err;
576
577         err = cell_management_prologue(CELL_DESTROY, cpu_data, id, &cell);
578         if (err)
579                 return err;
580
581         printk("Closing cell \"%s\"\n", cell->config->name);
582
583         cell_destroy_internal(cpu_data, cell);
584
585         previous = &root_cell;
586         while (previous->next != cell)
587                 previous = previous->next;
588         previous->next = cell->next;
589         num_cells--;
590
591         page_free(&mem_pool, cell, cell->data_pages);
592         page_map_dump_stats("after cell destruction");
593
594         cell_reconfig_completed();
595
596         cell_resume(cpu_data);
597
598         return 0;
599 }
600
601 static int cell_get_state(struct per_cpu *cpu_data, unsigned long id)
602 {
603         struct cell *cell;
604
605         if (cpu_data->cell != &root_cell)
606                 return -EPERM;
607
608         /*
609          * We do not need explicit synchronization with cell_create/destroy
610          * because their cell_suspend(root_cell) will not return before we left
611          * this hypercall.
612          */
613         for_each_cell(cell)
614                 if (cell->id == id) {
615                         u32 state = cell->comm_page.comm_region.cell_state;
616
617                         switch (state) {
618                         case JAILHOUSE_CELL_RUNNING:
619                         case JAILHOUSE_CELL_RUNNING_LOCKED:
620                         case JAILHOUSE_CELL_SHUT_DOWN:
621                         case JAILHOUSE_CELL_FAILED:
622                                 return state;
623                         default:
624                                 return -EINVAL;
625                         }
626                 }
627         return -ENOENT;
628 }
629
630 static int shutdown(struct per_cpu *cpu_data)
631 {
632         unsigned int this_cpu = cpu_data->cpu_id;
633         struct cell *cell;
634         unsigned int cpu;
635         int state, ret;
636
637         /* We do not support shutdown over non-root cells. */
638         if (cpu_data->cell != &root_cell)
639                 return -EPERM;
640
641         spin_lock(&shutdown_lock);
642
643         if (cpu_data->shutdown_state == SHUTDOWN_NONE) {
644                 state = SHUTDOWN_STARTED;
645                 for_each_non_root_cell(cell)
646                         if (!cell_shutdown_ok(cell))
647                                 state = -EPERM;
648
649                 if (state == SHUTDOWN_STARTED) {
650                         printk("Shutting down hypervisor\n");
651
652                         for_each_non_root_cell(cell) {
653                                 cell_suspend(cell, cpu_data);
654
655                                 printk("Closing cell \"%s\"\n",
656                                        cell->config->name);
657
658                                 for_each_cpu(cpu, cell->cpu_set) {
659                                         printk(" Releasing CPU %d\n", cpu);
660                                         arch_shutdown_cpu(cpu);
661                                 }
662                         }
663
664                         printk("Closing root cell \"%s\"\n",
665                                root_cell.config->name);
666                         arch_shutdown();
667                 }
668
669                 for_each_cpu(cpu, root_cell.cpu_set)
670                         per_cpu(cpu)->shutdown_state = state;
671         }
672
673         if (cpu_data->shutdown_state == SHUTDOWN_STARTED) {
674                 printk(" Releasing CPU %d\n", this_cpu);
675                 ret = 0;
676         } else
677                 ret = cpu_data->shutdown_state;
678         cpu_data->shutdown_state = SHUTDOWN_NONE;
679
680         spin_unlock(&shutdown_lock);
681
682         return ret;
683 }
684
685 static long hypervisor_get_info(struct per_cpu *cpu_data, unsigned long type)
686 {
687         switch (type) {
688         case JAILHOUSE_INFO_MEM_POOL_SIZE:
689                 return mem_pool.pages;
690         case JAILHOUSE_INFO_MEM_POOL_USED:
691                 return mem_pool.used_pages;
692         case JAILHOUSE_INFO_REMAP_POOL_SIZE:
693                 return remap_pool.pages;
694         case JAILHOUSE_INFO_REMAP_POOL_USED:
695                 return remap_pool.used_pages;
696         case JAILHOUSE_INFO_NUM_CELLS:
697                 return num_cells;
698         default:
699                 return -EINVAL;
700         }
701 }
702
703 static int cpu_get_info(struct per_cpu *cpu_data, unsigned long cpu_id,
704                         unsigned long type)
705 {
706         if (!cpu_id_valid(cpu_id))
707                 return -EINVAL;
708
709         /*
710          * We do not need explicit synchronization with cell_destroy because
711          * its cell_suspend(root_cell + this_cell) will not return before we
712          * left this hypercall.
713          */
714         if (cpu_data->cell != &root_cell &&
715             (cpu_id > cpu_data->cell->cpu_set->max_cpu_id ||
716              !test_bit(cpu_id, cpu_data->cell->cpu_set->bitmap)))
717                 return -EPERM;
718
719         if (type == JAILHOUSE_CPU_INFO_STATE) {
720                 return per_cpu(cpu_id)->failed ? JAILHOUSE_CPU_FAILED :
721                         JAILHOUSE_CPU_RUNNING;
722         } else if (type >= JAILHOUSE_CPU_INFO_STAT_BASE &&
723                 type - JAILHOUSE_CPU_INFO_STAT_BASE < JAILHOUSE_NUM_CPU_STATS) {
724                 type -= JAILHOUSE_CPU_INFO_STAT_BASE;
725                 return per_cpu(cpu_id)->stats[type] & BIT_MASK(30, 0);
726         } else
727                 return -EINVAL;
728 }
729
730 long hypercall(struct per_cpu *cpu_data, unsigned long code,
731                unsigned long arg1, unsigned long arg2)
732 {
733         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_HYPERCALL]++;
734
735         switch (code) {
736         case JAILHOUSE_HC_DISABLE:
737                 return shutdown(cpu_data);
738         case JAILHOUSE_HC_CELL_CREATE:
739                 return cell_create(cpu_data, arg1);
740         case JAILHOUSE_HC_CELL_START:
741                 return cell_start(cpu_data, arg1);
742         case JAILHOUSE_HC_CELL_SET_LOADABLE:
743                 return cell_set_loadable(cpu_data, arg1);
744         case JAILHOUSE_HC_CELL_DESTROY:
745                 return cell_destroy(cpu_data, arg1);
746         case JAILHOUSE_HC_HYPERVISOR_GET_INFO:
747                 return hypervisor_get_info(cpu_data, arg1);
748         case JAILHOUSE_HC_CELL_GET_STATE:
749                 return cell_get_state(cpu_data, arg1);
750         case JAILHOUSE_HC_CPU_GET_INFO:
751                 return cpu_get_info(cpu_data, arg1, arg2);
752         default:
753                 return -ENOSYS;
754         }
755 }
756
757 void panic_stop(struct per_cpu *cpu_data)
758 {
759         panic_printk("Stopping CPU");
760         if (cpu_data) {
761                 panic_printk(" %d", cpu_data->cpu_id);
762                 cpu_data->cpu_stopped = true;
763         }
764         panic_printk("\n");
765
766         if (phys_processor_id() == panic_cpu)
767                 panic_in_progress = 0;
768
769         arch_panic_stop(cpu_data);
770 }
771
772 void panic_halt(struct per_cpu *cpu_data)
773 {
774         struct cell *cell = cpu_data->cell;
775         bool cell_failed = true;
776         unsigned int cpu;
777
778         panic_printk("Parking CPU %d\n", cpu_data->cpu_id);
779
780         cpu_data->failed = true;
781         for_each_cpu(cpu, cell->cpu_set)
782                 if (!per_cpu(cpu)->failed) {
783                         cell_failed = false;
784                         break;
785                 }
786         if (cell_failed)
787                 cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_FAILED;
788
789         arch_panic_halt(cpu_data);
790
791         if (phys_processor_id() == panic_cpu)
792                 panic_in_progress = 0;
793 }