]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/control.c
core/driver: Reintroduce a second hypercall argument
[jailhouse.git] / hypervisor / control.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  *
6  * Authors:
7  *  Jan Kiszka <jan.kiszka@siemens.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/entry.h>
14 #include <jailhouse/control.h>
15 #include <jailhouse/printk.h>
16 #include <jailhouse/paging.h>
17 #include <jailhouse/processor.h>
18 #include <jailhouse/string.h>
19 #include <asm/bitops.h>
20 #include <asm/spinlock.h>
21
22 enum msg_type {MSG_REQUEST, MSG_INFORMATION};
23 enum failure_mode {ABORT_ON_ERROR, WARN_ON_ERROR};
24 enum management_task {CELL_START, CELL_SET_LOADABLE, CELL_DESTROY};
25
26 struct jailhouse_system *system_config;
27
28 static DEFINE_SPINLOCK(shutdown_lock);
29 static unsigned int num_cells = 1;
30
31 #define for_each_cell(c)        for ((c) = &root_cell; (c); (c) = (c)->next)
32 #define for_each_non_root_cell(c) \
33         for ((c) = root_cell.next; (c); (c) = (c)->next)
34
35 unsigned int next_cpu(unsigned int cpu, struct cpu_set *cpu_set, int exception)
36 {
37         do
38                 cpu++;
39         while (cpu <= cpu_set->max_cpu_id &&
40                (cpu == exception || !test_bit(cpu, cpu_set->bitmap)));
41         return cpu;
42 }
43
44 bool cpu_id_valid(unsigned long cpu_id)
45 {
46         const unsigned long *system_cpu_set =
47                 jailhouse_cell_cpu_set(&system_config->root_cell);
48
49         return (cpu_id < system_config->root_cell.cpu_set_size * 8 &&
50                 test_bit(cpu_id, system_cpu_set));
51 }
52
53 static void cell_suspend(struct cell *cell, struct per_cpu *cpu_data)
54 {
55         unsigned int cpu;
56
57         for_each_cpu_except(cpu, cell->cpu_set, cpu_data->cpu_id)
58                 arch_suspend_cpu(cpu);
59 }
60
61 static void cell_resume(struct per_cpu *cpu_data)
62 {
63         unsigned int cpu;
64
65         for_each_cpu_except(cpu, cpu_data->cell->cpu_set, cpu_data->cpu_id)
66                 arch_resume_cpu(cpu);
67 }
68
69 /**
70  * cell_send_message - Deliver a message to cell and wait for the reply
71  * @cell: target cell
72  * @message: message code to be sent (JAILHOUSE_MSG_*)
73  * @type: message type, defines the valid replies
74  *
75  * Returns true if a request message was approved or reception of an
76  * information message was acknowledged by the target cell. It also return true
77  * of the target cell does not support a communication region, is shut down or
78  * in failed state. Return false on request denial or invalid replies.
79  */
80 static bool cell_send_message(struct cell *cell, u32 message,
81                               enum msg_type type)
82 {
83         if (cell->config->flags & JAILHOUSE_CELL_PASSIVE_COMMREG)
84                 return true;
85
86         jailhouse_send_msg_to_cell(&cell->comm_page.comm_region, message);
87
88         while (1) {
89                 u32 reply = cell->comm_page.comm_region.reply_from_cell;
90                 u32 cell_state = cell->comm_page.comm_region.cell_state;
91
92                 if (cell_state == JAILHOUSE_CELL_SHUT_DOWN ||
93                     cell_state == JAILHOUSE_CELL_FAILED)
94                         return true;
95
96                 if ((type == MSG_REQUEST &&
97                      reply == JAILHOUSE_MSG_REQUEST_APPROVED) ||
98                     (type == MSG_INFORMATION &&
99                      reply == JAILHOUSE_MSG_RECEIVED))
100                         return true;
101
102                 if (reply != JAILHOUSE_MSG_NONE)
103                         return false;
104
105                 cpu_relax();
106         }
107 }
108
109 static bool cell_reconfig_ok(struct cell *excluded_cell)
110 {
111         struct cell *cell;
112
113         for_each_non_root_cell(cell)
114                 if (cell != excluded_cell &&
115                     cell->comm_page.comm_region.cell_state ==
116                                 JAILHOUSE_CELL_RUNNING_LOCKED)
117                         return false;
118         return true;
119 }
120
121 static void cell_reconfig_completed(void)
122 {
123         struct cell *cell;
124
125         for_each_non_root_cell(cell)
126                 cell_send_message(cell, JAILHOUSE_MSG_RECONFIG_COMPLETED,
127                                   MSG_INFORMATION);
128 }
129
130 static unsigned int get_free_cell_id(void)
131 {
132         unsigned int id = 0;
133         struct cell *cell;
134
135 retry:
136         for_each_cell(cell)
137                 if (cell->id == id) {
138                         id++;
139                         goto retry;
140                 }
141
142         return id;
143 }
144
145 /* cell must be zero-initialized */
146 int cell_init(struct cell *cell, bool copy_cpu_set)
147 {
148         const unsigned long *config_cpu_set =
149                 jailhouse_cell_cpu_set(cell->config);
150         unsigned long cpu_set_size = cell->config->cpu_set_size;
151         struct cpu_set *cpu_set;
152
153         cell->id = get_free_cell_id();
154
155         if (cpu_set_size > PAGE_SIZE)
156                 return -EINVAL;
157         else if (cpu_set_size > sizeof(cell->small_cpu_set.bitmap)) {
158                 cpu_set = page_alloc(&mem_pool, 1);
159                 if (!cpu_set)
160                         return -ENOMEM;
161                 cpu_set->max_cpu_id =
162                         ((PAGE_SIZE - sizeof(unsigned long)) * 8) - 1;
163         } else {
164                 cpu_set = &cell->small_cpu_set;
165                 cpu_set->max_cpu_id =
166                         (sizeof(cell->small_cpu_set.bitmap) * 8) - 1;
167         }
168         cell->cpu_set = cpu_set;
169         if (copy_cpu_set)
170                 memcpy(cell->cpu_set->bitmap, config_cpu_set, cpu_set_size);
171
172         return 0;
173 }
174
175 static void destroy_cpu_set(struct cell *cell)
176 {
177         if (cell->cpu_set != &cell->small_cpu_set)
178                 page_free(&mem_pool, cell->cpu_set, 1);
179 }
180
181 int check_mem_regions(const struct jailhouse_cell_desc *config)
182 {
183         const struct jailhouse_memory *mem =
184                 jailhouse_cell_mem_regions(config);
185         unsigned int n;
186
187         for (n = 0; n < config->num_memory_regions; n++, mem++) {
188                 if (mem->phys_start & ~PAGE_MASK ||
189                     mem->virt_start & ~PAGE_MASK ||
190                     mem->size & ~PAGE_MASK ||
191                     mem->flags & ~JAILHOUSE_MEM_VALID_FLAGS) {
192                         printk("FATAL: Invalid memory bar (%p, %p, %p, %x)\n",
193                                mem->phys_start, mem->virt_start, mem->size,
194                                mem->flags);
195                         return -EINVAL;
196                 }
197         }
198         return 0;
199 }
200
201 static bool address_in_region(unsigned long addr,
202                               const struct jailhouse_memory *region)
203 {
204         return addr >= region->phys_start &&
205                addr < (region->phys_start + region->size);
206 }
207
208 static int unmap_from_root_cell(const struct jailhouse_memory *mem)
209 {
210         /*
211          * arch_unmap_memory_region uses the virtual address of the memory
212          * region. As only the root cell has a guaranteed 1:1 mapping, make a
213          * copy where we ensure this.
214          */
215         struct jailhouse_memory tmp = *mem;
216
217         tmp.virt_start = tmp.phys_start;
218         return arch_unmap_memory_region(&root_cell, &tmp);
219 }
220
221 static int remap_to_root_cell(const struct jailhouse_memory *mem,
222                               enum failure_mode mode)
223 {
224         const struct jailhouse_memory *root_mem =
225                 jailhouse_cell_mem_regions(root_cell.config);
226         struct jailhouse_memory overlap;
227         unsigned int n;
228         int err = 0;
229
230         for (n = 0; n < root_cell.config->num_memory_regions;
231              n++, root_mem++) {
232                 if (address_in_region(mem->phys_start, root_mem)) {
233                         overlap.phys_start = mem->phys_start;
234                         overlap.size = root_mem->size -
235                                 (overlap.phys_start - root_mem->phys_start);
236                         if (overlap.size > mem->size)
237                                 overlap.size = mem->size;
238                 } else if (address_in_region(root_mem->phys_start, mem)) {
239                         overlap.phys_start = root_mem->phys_start;
240                         overlap.size = mem->size -
241                                 (overlap.phys_start - mem->phys_start);
242                         if (overlap.size > root_mem->size)
243                                 overlap.size = root_mem->size;
244                 } else
245                         continue;
246
247                 overlap.virt_start = root_mem->virt_start +
248                         overlap.phys_start - root_mem->phys_start;
249                 overlap.flags = root_mem->flags;
250
251                 err = arch_map_memory_region(&root_cell, &overlap);
252                 if (err) {
253                         if (mode == ABORT_ON_ERROR)
254                                 break;
255                         printk("WARNING: Failed to re-assign memory region "
256                                "to root cell\n");
257                 }
258         }
259         return err;
260 }
261
262 static void cell_destroy_internal(struct per_cpu *cpu_data, struct cell *cell)
263 {
264         const struct jailhouse_memory *mem =
265                 jailhouse_cell_mem_regions(cell->config);
266         unsigned int cpu, n;
267
268         for_each_cpu(cpu, cell->cpu_set) {
269                 arch_park_cpu(cpu);
270
271                 set_bit(cpu, root_cell.cpu_set->bitmap);
272                 per_cpu(cpu)->cell = &root_cell;
273                 per_cpu(cpu)->failed = false;
274         }
275
276         for (n = 0; n < cell->config->num_memory_regions; n++, mem++) {
277                 /*
278                  * This cannot fail. The region was mapped as a whole before,
279                  * thus no hugepages need to be broken up to unmap it.
280                  */
281                 arch_unmap_memory_region(cell, mem);
282                 if (!(mem->flags & JAILHOUSE_MEM_COMM_REGION))
283                         remap_to_root_cell(mem, WARN_ON_ERROR);
284         }
285
286         arch_cell_destroy(cpu_data, cell);
287
288         arch_config_commit(cpu_data, cell);
289 }
290
291 static int cell_create(struct per_cpu *cpu_data, unsigned long config_address)
292 {
293         unsigned long mapping_addr = TEMPORARY_MAPPING_CPU_BASE(cpu_data);
294         unsigned long cfg_page_offs = config_address & ~PAGE_MASK;
295         unsigned long cfg_header_size, cfg_total_size;
296         const struct jailhouse_memory *mem;
297         struct jailhouse_cell_desc *cfg;
298         unsigned int cell_pages, cpu, n;
299         struct cell *cell, *last;
300         int err;
301
302         /* We do not support creation over non-root cells. */
303         if (cpu_data->cell != &root_cell)
304                 return -EPERM;
305
306         cell_suspend(&root_cell, cpu_data);
307
308         if (!cell_reconfig_ok(NULL)) {
309                 err = -EPERM;
310                 goto err_resume;
311         }
312
313         cfg_header_size = (config_address & ~PAGE_MASK) +
314                 sizeof(struct jailhouse_cell_desc);
315
316         err = page_map_create(&hv_paging_structs, config_address & PAGE_MASK,
317                               cfg_header_size, mapping_addr,
318                               PAGE_READONLY_FLAGS, PAGE_MAP_NON_COHERENT);
319         if (err)
320                 goto err_resume;
321
322         cfg = (struct jailhouse_cell_desc *)(mapping_addr + cfg_page_offs);
323         cfg_total_size = jailhouse_cell_config_size(cfg);
324         if (cfg_total_size + cfg_page_offs > NUM_TEMPORARY_PAGES * PAGE_SIZE) {
325                 err = -E2BIG;
326                 goto err_resume;
327         }
328
329         for_each_cell(cell)
330                 if (strcmp(cell->config->name, cfg->name) == 0) {
331                         err = -EEXIST;
332                         goto err_resume;
333                 }
334
335         err = page_map_create(&hv_paging_structs, config_address & PAGE_MASK,
336                               cfg_total_size + cfg_page_offs, mapping_addr,
337                               PAGE_READONLY_FLAGS, PAGE_MAP_NON_COHERENT);
338         if (err)
339                 goto err_resume;
340
341         err = check_mem_regions(cfg);
342         if (err)
343                 goto err_resume;
344
345         cell_pages = PAGE_ALIGN(sizeof(*cell) + cfg_total_size) / PAGE_SIZE;
346         cell = page_alloc(&mem_pool, cell_pages);
347         if (!cell) {
348                 err = -ENOMEM;
349                 goto err_resume;
350         }
351
352         cell->data_pages = cell_pages;
353         cell->config = ((void *)cell) + sizeof(*cell);
354         memcpy(cell->config, cfg, cfg_total_size);
355
356         err = cell_init(cell, true);
357         if (err)
358                 goto err_free_cell;
359
360         /* don't assign the CPU we are currently running on */
361         if (cpu_data->cpu_id <= cell->cpu_set->max_cpu_id &&
362             test_bit(cpu_data->cpu_id, cell->cpu_set->bitmap)) {
363                 err = -EBUSY;
364                 goto err_free_cpu_set;
365         }
366
367         /* the root cell's cpu set must be super-set of new cell's set */
368         if (root_cell.cpu_set->max_cpu_id < cell->cpu_set->max_cpu_id) {
369                 err = -EBUSY;
370                 goto err_free_cpu_set;
371         }
372         for_each_cpu(cpu, cell->cpu_set)
373                 if (!test_bit(cpu, root_cell.cpu_set->bitmap)) {
374                         err = -EBUSY;
375                         goto err_free_cpu_set;
376                 }
377
378         err = arch_cell_create(cpu_data, cell);
379         if (err)
380                 goto err_free_cpu_set;
381
382         for_each_cpu(cpu, cell->cpu_set) {
383                 arch_park_cpu(cpu);
384
385                 clear_bit(cpu, root_cell.cpu_set->bitmap);
386                 per_cpu(cpu)->cell = cell;
387         }
388
389         /*
390          * Unmap the cell's memory regions from the root cell and map them to
391          * the new cell instead.
392          */
393         mem = jailhouse_cell_mem_regions(cell->config);
394         for (n = 0; n < cell->config->num_memory_regions; n++, mem++) {
395                 /*
396                  * Unmap exceptions:
397                  *  - the communication region is not backed by root memory
398                  */
399                 if (!(mem->flags & JAILHOUSE_MEM_COMM_REGION)) {
400                         err = unmap_from_root_cell(mem);
401                         if (err)
402                                 goto err_destroy_cell;
403                 }
404
405                 err = arch_map_memory_region(cell, mem);
406                 if (err)
407                         goto err_destroy_cell;
408         }
409
410         arch_config_commit(cpu_data, cell);
411
412         cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_SHUT_DOWN;
413
414         last = &root_cell;
415         while (last->next)
416                 last = last->next;
417         last->next = cell;
418         num_cells++;
419
420         cell_reconfig_completed();
421
422         printk("Created cell \"%s\"\n", cell->config->name);
423
424         page_map_dump_stats("after cell creation");
425
426         cell_resume(cpu_data);
427
428         return cell->id;
429
430 err_destroy_cell:
431         cell_destroy_internal(cpu_data, cell);
432 err_free_cpu_set:
433         destroy_cpu_set(cell);
434 err_free_cell:
435         page_free(&mem_pool, cell, cell_pages);
436
437 err_resume:
438         cell_resume(cpu_data);
439
440         return err;
441 }
442
443 static bool cell_shutdown_ok(struct cell *cell)
444 {
445         return cell_send_message(cell, JAILHOUSE_MSG_SHUTDOWN_REQUEST,
446                                  MSG_REQUEST);
447 }
448
449 static int cell_management_prologue(enum management_task task,
450                                     struct per_cpu *cpu_data, unsigned long id,
451                                     struct cell **cell_ptr)
452 {
453         /* We do not support management commands over non-root cells. */
454         if (cpu_data->cell != &root_cell)
455                 return -EPERM;
456
457         cell_suspend(&root_cell, cpu_data);
458
459         for_each_cell(*cell_ptr)
460                 if ((*cell_ptr)->id == id)
461                         break;
462
463         if (!*cell_ptr) {
464                 cell_resume(cpu_data);
465                 return -ENOENT;
466         }
467
468         /* root cell cannot be managed */
469         if (*cell_ptr == &root_cell) {
470                 cell_resume(cpu_data);
471                 return -EINVAL;
472         }
473
474         if ((task == CELL_DESTROY && !cell_reconfig_ok(*cell_ptr)) ||
475             !cell_shutdown_ok(*cell_ptr)) {
476                 cell_resume(cpu_data);
477                 return -EPERM;
478         }
479
480         cell_suspend(*cell_ptr, cpu_data);
481
482         return 0;
483 }
484
485 static int cell_start(struct per_cpu *cpu_data, unsigned long id)
486 {
487         const struct jailhouse_memory *mem;
488         unsigned int cpu, n;
489         struct cell *cell;
490         int err;
491
492         err = cell_management_prologue(CELL_START, cpu_data, id, &cell);
493         if (err)
494                 return err;
495
496         if (cell->loadable) {
497                 /* unmap all loadable memory regions from the root cell */
498                 mem = jailhouse_cell_mem_regions(cell->config);
499                 for (n = 0; n < cell->config->num_memory_regions; n++, mem++)
500                         if (mem->flags & JAILHOUSE_MEM_LOADABLE) {
501                                 err = unmap_from_root_cell(mem);
502                                 if (err)
503                                         goto out_resume;
504                         }
505
506                 arch_config_commit(cpu_data, NULL);
507
508                 cell->loadable = false;
509         }
510
511         /* present a consistent Communication Region state to the cell */
512         cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_RUNNING;
513         cell->comm_page.comm_region.msg_to_cell = JAILHOUSE_MSG_NONE;
514
515         for_each_cpu(cpu, cell->cpu_set) {
516                 per_cpu(cpu)->failed = false;
517                 arch_reset_cpu(cpu);
518         }
519
520         printk("Started cell \"%s\"\n", cell->config->name);
521
522 out_resume:
523         cell_resume(cpu_data);
524
525         return err;
526 }
527
528 static int cell_set_loadable(struct per_cpu *cpu_data, unsigned long id)
529 {
530         const struct jailhouse_memory *mem;
531         unsigned int cpu, n;
532         struct cell *cell;
533         int err;
534
535         err = cell_management_prologue(CELL_SET_LOADABLE, cpu_data, id, &cell);
536         if (err)
537                 return err;
538
539         for_each_cpu(cpu, cell->cpu_set) {
540                 per_cpu(cpu)->failed = false;
541                 arch_park_cpu(cpu);
542         }
543
544         if (cell->loadable)
545                 goto out_resume;
546
547         cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_SHUT_DOWN;
548         cell->loadable = true;
549
550         /* map all loadable memory regions into the root cell */
551         mem = jailhouse_cell_mem_regions(cell->config);
552         for (n = 0; n < cell->config->num_memory_regions; n++, mem++)
553                 if (mem->flags & JAILHOUSE_MEM_LOADABLE) {
554                         err = remap_to_root_cell(mem, ABORT_ON_ERROR);
555                         if (err)
556                                 goto out_resume;
557                 }
558
559         arch_config_commit(cpu_data, NULL);
560
561         printk("Cell \"%s\" can be loaded\n", cell->config->name);
562
563 out_resume:
564         cell_resume(cpu_data);
565
566         return err;
567 }
568
569 static int cell_destroy(struct per_cpu *cpu_data, unsigned long id)
570 {
571         struct cell *cell, *previous;
572         int err;
573
574         err = cell_management_prologue(CELL_DESTROY, cpu_data, id, &cell);
575         if (err)
576                 return err;
577
578         printk("Closing cell \"%s\"\n", cell->config->name);
579
580         cell_destroy_internal(cpu_data, cell);
581
582         previous = &root_cell;
583         while (previous->next != cell)
584                 previous = previous->next;
585         previous->next = cell->next;
586         num_cells--;
587
588         page_free(&mem_pool, cell, cell->data_pages);
589         page_map_dump_stats("after cell destruction");
590
591         cell_reconfig_completed();
592
593         cell_resume(cpu_data);
594
595         return 0;
596 }
597
598 static int cell_get_state(struct per_cpu *cpu_data, unsigned long id)
599 {
600         struct cell *cell;
601
602         if (cpu_data->cell != &root_cell)
603                 return -EPERM;
604
605         /*
606          * We do not need explicit synchronization with cell_create/destroy
607          * because their cell_suspend(root_cell) will not return before we left
608          * this hypercall.
609          */
610         for_each_cell(cell)
611                 if (cell->id == id) {
612                         u32 state = cell->comm_page.comm_region.cell_state;
613
614                         switch (state) {
615                         case JAILHOUSE_CELL_RUNNING:
616                         case JAILHOUSE_CELL_RUNNING_LOCKED:
617                         case JAILHOUSE_CELL_SHUT_DOWN:
618                         case JAILHOUSE_CELL_FAILED:
619                                 return state;
620                         default:
621                                 return -EINVAL;
622                         }
623                 }
624         return -ENOENT;
625 }
626
627 static int shutdown(struct per_cpu *cpu_data)
628 {
629         unsigned int this_cpu = cpu_data->cpu_id;
630         struct cell *cell;
631         unsigned int cpu;
632         int state, ret;
633
634         /* We do not support shutdown over non-root cells. */
635         if (cpu_data->cell != &root_cell)
636                 return -EPERM;
637
638         spin_lock(&shutdown_lock);
639
640         if (cpu_data->shutdown_state == SHUTDOWN_NONE) {
641                 state = SHUTDOWN_STARTED;
642                 for_each_non_root_cell(cell)
643                         if (!cell_shutdown_ok(cell))
644                                 state = -EPERM;
645
646                 if (state == SHUTDOWN_STARTED) {
647                         printk("Shutting down hypervisor\n");
648
649                         for_each_non_root_cell(cell) {
650                                 cell_suspend(cell, cpu_data);
651
652                                 printk("Closing cell \"%s\"\n",
653                                        cell->config->name);
654
655                                 for_each_cpu(cpu, cell->cpu_set) {
656                                         printk(" Releasing CPU %d\n", cpu);
657                                         arch_shutdown_cpu(cpu);
658                                 }
659                         }
660
661                         printk("Closing root cell \"%s\"\n",
662                                root_cell.config->name);
663                         arch_shutdown();
664                 }
665
666                 for_each_cpu(cpu, root_cell.cpu_set)
667                         per_cpu(cpu)->shutdown_state = state;
668         }
669
670         if (cpu_data->shutdown_state == SHUTDOWN_STARTED) {
671                 printk(" Releasing CPU %d\n", this_cpu);
672                 ret = 0;
673         } else
674                 ret = cpu_data->shutdown_state;
675         cpu_data->shutdown_state = SHUTDOWN_NONE;
676
677         spin_unlock(&shutdown_lock);
678
679         return ret;
680 }
681
682 static long hypervisor_get_info(struct per_cpu *cpu_data, unsigned long type)
683 {
684         switch (type) {
685         case JAILHOUSE_INFO_MEM_POOL_SIZE:
686                 return mem_pool.pages;
687         case JAILHOUSE_INFO_MEM_POOL_USED:
688                 return mem_pool.used_pages;
689         case JAILHOUSE_INFO_REMAP_POOL_SIZE:
690                 return remap_pool.pages;
691         case JAILHOUSE_INFO_REMAP_POOL_USED:
692                 return remap_pool.used_pages;
693         case JAILHOUSE_INFO_NUM_CELLS:
694                 return num_cells;
695         default:
696                 return -EINVAL;
697         }
698 }
699
700 static int cpu_get_state(struct per_cpu *cpu_data, unsigned long cpu_id)
701 {
702         if (!cpu_id_valid(cpu_id))
703                 return -EINVAL;
704
705         /*
706          * We do not need explicit synchronization with cell_destroy because
707          * its cell_suspend(root_cell + this_cell) will not return before we
708          * left this hypercall.
709          */
710         if (cpu_data->cell != &root_cell &&
711             (cpu_id > cpu_data->cell->cpu_set->max_cpu_id ||
712              !test_bit(cpu_id, cpu_data->cell->cpu_set->bitmap)))
713                 return -EPERM;
714
715         return per_cpu(cpu_id)->failed ? JAILHOUSE_CPU_FAILED :
716                 JAILHOUSE_CPU_RUNNING;
717 }
718
719 long hypercall(struct per_cpu *cpu_data, unsigned long code,
720                unsigned long arg1, unsigned long arg2)
721 {
722         switch (code) {
723         case JAILHOUSE_HC_DISABLE:
724                 return shutdown(cpu_data);
725         case JAILHOUSE_HC_CELL_CREATE:
726                 return cell_create(cpu_data, arg1);
727         case JAILHOUSE_HC_CELL_START:
728                 return cell_start(cpu_data, arg1);
729         case JAILHOUSE_HC_CELL_SET_LOADABLE:
730                 return cell_set_loadable(cpu_data, arg1);
731         case JAILHOUSE_HC_CELL_DESTROY:
732                 return cell_destroy(cpu_data, arg1);
733         case JAILHOUSE_HC_HYPERVISOR_GET_INFO:
734                 return hypervisor_get_info(cpu_data, arg1);
735         case JAILHOUSE_HC_CELL_GET_STATE:
736                 return cell_get_state(cpu_data, arg1);
737         case JAILHOUSE_HC_CPU_GET_STATE:
738                 return cpu_get_state(cpu_data, arg1);
739         default:
740                 return -ENOSYS;
741         }
742 }
743
744 void panic_stop(struct per_cpu *cpu_data)
745 {
746         panic_printk("Stopping CPU");
747         if (cpu_data) {
748                 panic_printk(" %d", cpu_data->cpu_id);
749                 cpu_data->cpu_stopped = true;
750         }
751         panic_printk("\n");
752
753         if (phys_processor_id() == panic_cpu)
754                 panic_in_progress = 0;
755
756         arch_panic_stop(cpu_data);
757 }
758
759 void panic_halt(struct per_cpu *cpu_data)
760 {
761         struct cell *cell = cpu_data->cell;
762         bool cell_failed = true;
763         unsigned int cpu;
764
765         panic_printk("Parking CPU %d\n", cpu_data->cpu_id);
766
767         cpu_data->failed = true;
768         for_each_cpu(cpu, cell->cpu_set)
769                 if (!per_cpu(cpu)->failed) {
770                         cell_failed = false;
771                         break;
772                 }
773         if (cell_failed)
774                 cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_FAILED;
775
776         arch_panic_halt(cpu_data);
777
778         if (phys_processor_id() == panic_cpu)
779                 panic_in_progress = 0;
780 }