]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/control.c
core: Add generic MMIO access dispatching
[jailhouse.git] / hypervisor / control.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013-2015
5  *
6  * Authors:
7  *  Jan Kiszka <jan.kiszka@siemens.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/entry.h>
14 #include <jailhouse/control.h>
15 #include <jailhouse/mmio.h>
16 #include <jailhouse/printk.h>
17 #include <jailhouse/paging.h>
18 #include <jailhouse/processor.h>
19 #include <jailhouse/string.h>
20 #include <jailhouse/utils.h>
21 #include <asm/bitops.h>
22 #include <asm/spinlock.h>
23
24 enum msg_type {MSG_REQUEST, MSG_INFORMATION};
25 enum failure_mode {ABORT_ON_ERROR, WARN_ON_ERROR};
26 enum management_task {CELL_START, CELL_SET_LOADABLE, CELL_DESTROY};
27
28 /** System configuration as used while activating the hypervisor. */
29 struct jailhouse_system *system_config;
30 /** State structure of the root cell. @ingroup Control */
31 struct cell root_cell;
32
33 static DEFINE_SPINLOCK(shutdown_lock);
34 static unsigned int num_cells = 1;
35
36 /**
37  * CPU set iterator.
38  * @param cpu           Previous CPU ID.
39  * @param cpu_set       CPU set to iterate over.
40  * @param exception     CPU ID to skip if it is contained.
41  *
42  * @return Next CPU ID in the set.
43  *
44  * @note For internal use only. Use for_each_cpu() or for_each_cpu_except()
45  * instead.
46  */
47 unsigned int next_cpu(unsigned int cpu, struct cpu_set *cpu_set, int exception)
48 {
49         do
50                 cpu++;
51         while (cpu <= cpu_set->max_cpu_id &&
52                (cpu == exception || !test_bit(cpu, cpu_set->bitmap)));
53         return cpu;
54 }
55
56 /**
57  * Check if a CPU ID is contained in the system's CPU set, i.e. the initial CPU
58  * set of the root cell.
59  * @param cpu_id        CPU ID to check.
60  *
61  * @return True if CPU ID is valid.
62  */
63 bool cpu_id_valid(unsigned long cpu_id)
64 {
65         const unsigned long *system_cpu_set =
66                 jailhouse_cell_cpu_set(&system_config->root_cell);
67
68         return (cpu_id < system_config->root_cell.cpu_set_size * 8 &&
69                 test_bit(cpu_id, system_cpu_set));
70 }
71
72 static void cell_suspend(struct cell *cell, struct per_cpu *cpu_data)
73 {
74         unsigned int cpu;
75
76         for_each_cpu_except(cpu, cell->cpu_set, cpu_data->cpu_id)
77                 arch_suspend_cpu(cpu);
78 }
79
80 static void cell_resume(struct per_cpu *cpu_data)
81 {
82         unsigned int cpu;
83
84         for_each_cpu_except(cpu, cpu_data->cell->cpu_set, cpu_data->cpu_id)
85                 arch_resume_cpu(cpu);
86 }
87
88 /**
89  * Deliver a message to cell and wait for the reply.
90  * @param cell          Target cell.
91  * @param message       Message code to be sent (JAILHOUSE_MSG_*).
92  * @param type          Message type, defines the valid replies.
93  *
94  * @return True if a request message was approved or reception of an
95  *         informational message was acknowledged by the target cell. It also
96  *         returns true if the target cell does not support an active
97  *         communication region, is shut down or in failed state. Returns
98  *         false on request denial or invalid replies.
99  */
100 static bool cell_send_message(struct cell *cell, u32 message,
101                               enum msg_type type)
102 {
103         if (cell->config->flags & JAILHOUSE_CELL_PASSIVE_COMMREG)
104                 return true;
105
106         jailhouse_send_msg_to_cell(&cell->comm_page.comm_region, message);
107
108         while (1) {
109                 u32 reply = cell->comm_page.comm_region.reply_from_cell;
110                 u32 cell_state = cell->comm_page.comm_region.cell_state;
111
112                 if (cell_state == JAILHOUSE_CELL_SHUT_DOWN ||
113                     cell_state == JAILHOUSE_CELL_FAILED)
114                         return true;
115
116                 if ((type == MSG_REQUEST &&
117                      reply == JAILHOUSE_MSG_REQUEST_APPROVED) ||
118                     (type == MSG_INFORMATION &&
119                      reply == JAILHOUSE_MSG_RECEIVED))
120                         return true;
121
122                 if (reply != JAILHOUSE_MSG_NONE)
123                         return false;
124
125                 cpu_relax();
126         }
127 }
128
129 static bool cell_reconfig_ok(struct cell *excluded_cell)
130 {
131         struct cell *cell;
132
133         for_each_non_root_cell(cell)
134                 if (cell != excluded_cell &&
135                     cell->comm_page.comm_region.cell_state ==
136                                 JAILHOUSE_CELL_RUNNING_LOCKED)
137                         return false;
138         return true;
139 }
140
141 static void cell_reconfig_completed(void)
142 {
143         struct cell *cell;
144
145         for_each_non_root_cell(cell)
146                 cell_send_message(cell, JAILHOUSE_MSG_RECONFIG_COMPLETED,
147                                   MSG_INFORMATION);
148 }
149
150 static unsigned int get_free_cell_id(void)
151 {
152         unsigned int id = 0;
153         struct cell *cell;
154
155 retry:
156         for_each_cell(cell)
157                 if (cell->id == id) {
158                         id++;
159                         goto retry;
160                 }
161
162         return id;
163 }
164
165 /**
166  * Initialize a new cell.
167  * @param cell  Cell to be initializes.
168  *
169  * @return 0 on success, negative error code otherwise.
170  *
171  * @note The cell data structure must be zero-initialized.
172  */
173 int cell_init(struct cell *cell)
174 {
175         const unsigned long *config_cpu_set =
176                 jailhouse_cell_cpu_set(cell->config);
177         unsigned long cpu_set_size = cell->config->cpu_set_size;
178         struct cpu_set *cpu_set;
179         int err;
180
181         cell->id = get_free_cell_id();
182
183         if (cpu_set_size > PAGE_SIZE)
184                 return trace_error(-EINVAL);
185         if (cpu_set_size > sizeof(cell->small_cpu_set.bitmap)) {
186                 cpu_set = page_alloc(&mem_pool, 1);
187                 if (!cpu_set)
188                         return -ENOMEM;
189         } else {
190                 cpu_set = &cell->small_cpu_set;
191         }
192         cpu_set->max_cpu_id = cpu_set_size * 8 - 1;
193         memcpy(cpu_set->bitmap, config_cpu_set, cpu_set_size);
194
195         cell->cpu_set = cpu_set;
196
197         err = mmio_cell_init(cell);
198         if (err && cell->cpu_set != &cell->small_cpu_set)
199                 page_free(&mem_pool, cell->cpu_set, 1);
200
201         return err;
202 }
203
204 static void cell_exit(struct cell *cell)
205 {
206         mmio_cell_exit(cell);
207
208         if (cell->cpu_set != &cell->small_cpu_set)
209                 page_free(&mem_pool, cell->cpu_set, 1);
210 }
211
212 /**
213  * Perform basic validation of cell memory regions.
214  * @param config        Cell configuration description.
215  *
216  * @return 0 if the regions are valid, @c -EINVAL if the validation failed.
217  *
218  * Checks performed on the memory regions are:
219  * \li Page alignment of physical and virtual address and the size.
220  * \li Use of supported flags only.
221  */
222 int check_mem_regions(const struct jailhouse_cell_desc *config)
223 {
224         const struct jailhouse_memory *mem =
225                 jailhouse_cell_mem_regions(config);
226         unsigned int n;
227
228         for (n = 0; n < config->num_memory_regions; n++, mem++) {
229                 if (mem->phys_start & ~PAGE_MASK ||
230                     mem->virt_start & ~PAGE_MASK ||
231                     mem->size & ~PAGE_MASK ||
232                     mem->flags & ~JAILHOUSE_MEM_VALID_FLAGS)
233                         return trace_error(-EINVAL);
234         }
235         return 0;
236 }
237
238 /**
239  * Apply system configuration changes.
240  * @param cell_added_removed    Cell that was added or removed to/from the
241  *                              system or NULL.
242  *
243  * @see arch_config_commit
244  * @see pci_config_commit
245  */
246 void config_commit(struct cell *cell_added_removed)
247 {
248         arch_flush_cell_vcpu_caches(&root_cell);
249         if (cell_added_removed && cell_added_removed != &root_cell)
250                 arch_flush_cell_vcpu_caches(cell_added_removed);
251
252         arch_config_commit(cell_added_removed);
253 }
254
255 static bool address_in_region(unsigned long addr,
256                               const struct jailhouse_memory *region)
257 {
258         return addr >= region->phys_start &&
259                addr < (region->phys_start + region->size);
260 }
261
262 static int unmap_from_root_cell(const struct jailhouse_memory *mem)
263 {
264         /*
265          * arch_unmap_memory_region uses the virtual address of the memory
266          * region. As only the root cell has a guaranteed 1:1 mapping, make a
267          * copy where we ensure this.
268          */
269         struct jailhouse_memory tmp = *mem;
270
271         tmp.virt_start = tmp.phys_start;
272         return arch_unmap_memory_region(&root_cell, &tmp);
273 }
274
275 static int remap_to_root_cell(const struct jailhouse_memory *mem,
276                               enum failure_mode mode)
277 {
278         const struct jailhouse_memory *root_mem =
279                 jailhouse_cell_mem_regions(root_cell.config);
280         struct jailhouse_memory overlap;
281         unsigned int n;
282         int err = 0;
283
284         for (n = 0; n < root_cell.config->num_memory_regions;
285              n++, root_mem++) {
286                 if (address_in_region(mem->phys_start, root_mem)) {
287                         overlap.phys_start = mem->phys_start;
288                         overlap.size = root_mem->size -
289                                 (overlap.phys_start - root_mem->phys_start);
290                         if (overlap.size > mem->size)
291                                 overlap.size = mem->size;
292                 } else if (address_in_region(root_mem->phys_start, mem)) {
293                         overlap.phys_start = root_mem->phys_start;
294                         overlap.size = mem->size -
295                                 (overlap.phys_start - mem->phys_start);
296                         if (overlap.size > root_mem->size)
297                                 overlap.size = root_mem->size;
298                 } else
299                         continue;
300
301                 overlap.virt_start = root_mem->virt_start +
302                         overlap.phys_start - root_mem->phys_start;
303                 overlap.flags = root_mem->flags;
304
305                 err = arch_map_memory_region(&root_cell, &overlap);
306                 if (err) {
307                         if (mode == ABORT_ON_ERROR)
308                                 break;
309                         printk("WARNING: Failed to re-assign memory region "
310                                "to root cell\n");
311                 }
312         }
313         return err;
314 }
315
316 static void cell_destroy_internal(struct per_cpu *cpu_data, struct cell *cell)
317 {
318         const struct jailhouse_memory *mem =
319                 jailhouse_cell_mem_regions(cell->config);
320         unsigned int cpu, n;
321
322         for_each_cpu(cpu, cell->cpu_set) {
323                 arch_park_cpu(cpu);
324
325                 set_bit(cpu, root_cell.cpu_set->bitmap);
326                 per_cpu(cpu)->cell = &root_cell;
327                 per_cpu(cpu)->failed = false;
328                 memset(per_cpu(cpu)->stats, 0, sizeof(per_cpu(cpu)->stats));
329         }
330
331         for (n = 0; n < cell->config->num_memory_regions; n++, mem++) {
332                 /*
333                  * This cannot fail. The region was mapped as a whole before,
334                  * thus no hugepages need to be broken up to unmap it.
335                  */
336                 arch_unmap_memory_region(cell, mem);
337                 if (!(mem->flags & (JAILHOUSE_MEM_COMM_REGION |
338                                     JAILHOUSE_MEM_ROOTSHARED)))
339                         remap_to_root_cell(mem, WARN_ON_ERROR);
340         }
341
342         arch_cell_destroy(cell);
343
344         config_commit(cell);
345
346         cell_exit(cell);
347 }
348
349 static int cell_create(struct per_cpu *cpu_data, unsigned long config_address)
350 {
351         unsigned long cfg_page_offs = config_address & ~PAGE_MASK;
352         unsigned int cfg_pages, cell_pages, cpu, n;
353         const struct jailhouse_memory *mem;
354         struct jailhouse_cell_desc *cfg;
355         unsigned long cfg_total_size;
356         struct cell *cell, *last;
357         void *cfg_mapping;
358         int err;
359
360         /* We do not support creation over non-root cells. */
361         if (cpu_data->cell != &root_cell)
362                 return -EPERM;
363
364         cell_suspend(&root_cell, cpu_data);
365
366         if (!cell_reconfig_ok(NULL)) {
367                 err = -EPERM;
368                 goto err_resume;
369         }
370
371         cfg_pages = PAGES(cfg_page_offs + sizeof(struct jailhouse_cell_desc));
372         cfg_mapping = paging_get_guest_pages(NULL, config_address, cfg_pages,
373                                              PAGE_READONLY_FLAGS);
374         if (!cfg_mapping) {
375                 err = -ENOMEM;
376                 goto err_resume;
377         }
378
379         cfg = (struct jailhouse_cell_desc *)(cfg_mapping + cfg_page_offs);
380
381         for_each_cell(cell)
382                 /*
383                  * No bound checking needed, thus strcmp is safe here because
384                  * sizeof(cell->config->name) == sizeof(cfg->name) and
385                  * cell->config->name is guaranteed to be null-terminated.
386                  */
387                 if (strcmp(cell->config->name, cfg->name) == 0) {
388                         err = -EEXIST;
389                         goto err_resume;
390                 }
391
392         cfg_total_size = jailhouse_cell_config_size(cfg);
393         cfg_pages = PAGES(cfg_page_offs + cfg_total_size);
394         if (cfg_pages > NUM_TEMPORARY_PAGES) {
395                 err = trace_error(-E2BIG);
396                 goto err_resume;
397         }
398
399         if (!paging_get_guest_pages(NULL, config_address, cfg_pages,
400                                     PAGE_READONLY_FLAGS)) {
401                 err = -ENOMEM;
402                 goto err_resume;
403         }
404
405         err = check_mem_regions(cfg);
406         if (err)
407                 goto err_resume;
408
409         cell_pages = PAGES(sizeof(*cell) + cfg_total_size);
410         cell = page_alloc(&mem_pool, cell_pages);
411         if (!cell) {
412                 err = -ENOMEM;
413                 goto err_resume;
414         }
415
416         cell->data_pages = cell_pages;
417         cell->config = ((void *)cell) + sizeof(*cell);
418         memcpy(cell->config, cfg, cfg_total_size);
419
420         err = cell_init(cell);
421         if (err)
422                 goto err_free_cell;
423
424         /* don't assign the CPU we are currently running on */
425         if (cell_owns_cpu(cell, cpu_data->cpu_id)) {
426                 err = trace_error(-EBUSY);
427                 goto err_cell_exit;
428         }
429
430         /* the root cell's cpu set must be super-set of new cell's set */
431         for_each_cpu(cpu, cell->cpu_set)
432                 if (!cell_owns_cpu(&root_cell, cpu)) {
433                         err = trace_error(-EBUSY);
434                         goto err_cell_exit;
435                 }
436
437         err = arch_cell_create(cell);
438         if (err)
439                 goto err_cell_exit;
440
441         for_each_cpu(cpu, cell->cpu_set) {
442                 arch_park_cpu(cpu);
443
444                 clear_bit(cpu, root_cell.cpu_set->bitmap);
445                 per_cpu(cpu)->cell = cell;
446                 memset(per_cpu(cpu)->stats, 0, sizeof(per_cpu(cpu)->stats));
447         }
448
449         /*
450          * Unmap the cell's memory regions from the root cell and map them to
451          * the new cell instead.
452          */
453         mem = jailhouse_cell_mem_regions(cell->config);
454         for (n = 0; n < cell->config->num_memory_regions; n++, mem++) {
455                 /*
456                  * Unmap exceptions:
457                  *  - the communication region is not backed by root memory
458                  *  - regions that may be shared with the root cell
459                  */
460                 if (!(mem->flags & (JAILHOUSE_MEM_COMM_REGION |
461                                     JAILHOUSE_MEM_ROOTSHARED))) {
462                         err = unmap_from_root_cell(mem);
463                         if (err)
464                                 goto err_destroy_cell;
465                 }
466
467                 err = arch_map_memory_region(cell, mem);
468                 if (err)
469                         goto err_destroy_cell;
470         }
471
472         config_commit(cell);
473
474         cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_SHUT_DOWN;
475
476         last = &root_cell;
477         while (last->next)
478                 last = last->next;
479         last->next = cell;
480         num_cells++;
481
482         cell_reconfig_completed();
483
484         printk("Created cell \"%s\"\n", cell->config->name);
485
486         paging_dump_stats("after cell creation");
487
488         cell_resume(cpu_data);
489
490         return cell->id;
491
492 err_destroy_cell:
493         cell_destroy_internal(cpu_data, cell);
494         /* cell_destroy_internal already calls cell_exit */
495         goto err_free_cell;
496 err_cell_exit:
497         cell_exit(cell);
498 err_free_cell:
499         page_free(&mem_pool, cell, cell_pages);
500 err_resume:
501         cell_resume(cpu_data);
502
503         return err;
504 }
505
506 static bool cell_shutdown_ok(struct cell *cell)
507 {
508         return cell_send_message(cell, JAILHOUSE_MSG_SHUTDOWN_REQUEST,
509                                  MSG_REQUEST);
510 }
511
512 static int cell_management_prologue(enum management_task task,
513                                     struct per_cpu *cpu_data, unsigned long id,
514                                     struct cell **cell_ptr)
515 {
516         /* We do not support management commands over non-root cells. */
517         if (cpu_data->cell != &root_cell)
518                 return -EPERM;
519
520         cell_suspend(&root_cell, cpu_data);
521
522         for_each_cell(*cell_ptr)
523                 if ((*cell_ptr)->id == id)
524                         break;
525
526         if (!*cell_ptr) {
527                 cell_resume(cpu_data);
528                 return -ENOENT;
529         }
530
531         /* root cell cannot be managed */
532         if (*cell_ptr == &root_cell) {
533                 cell_resume(cpu_data);
534                 return -EINVAL;
535         }
536
537         if ((task == CELL_DESTROY && !cell_reconfig_ok(*cell_ptr)) ||
538             !cell_shutdown_ok(*cell_ptr)) {
539                 cell_resume(cpu_data);
540                 return -EPERM;
541         }
542
543         cell_suspend(*cell_ptr, cpu_data);
544
545         return 0;
546 }
547
548 static int cell_start(struct per_cpu *cpu_data, unsigned long id)
549 {
550         const struct jailhouse_memory *mem;
551         unsigned int cpu, n;
552         struct cell *cell;
553         int err;
554
555         err = cell_management_prologue(CELL_START, cpu_data, id, &cell);
556         if (err)
557                 return err;
558
559         if (cell->loadable) {
560                 /* unmap all loadable memory regions from the root cell */
561                 mem = jailhouse_cell_mem_regions(cell->config);
562                 for (n = 0; n < cell->config->num_memory_regions; n++, mem++)
563                         if (mem->flags & JAILHOUSE_MEM_LOADABLE) {
564                                 err = unmap_from_root_cell(mem);
565                                 if (err)
566                                         goto out_resume;
567                         }
568
569                 config_commit(NULL);
570
571                 cell->loadable = false;
572         }
573
574         /* present a consistent Communication Region state to the cell */
575         cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_RUNNING;
576         cell->comm_page.comm_region.msg_to_cell = JAILHOUSE_MSG_NONE;
577
578         for_each_cpu(cpu, cell->cpu_set) {
579                 per_cpu(cpu)->failed = false;
580                 arch_reset_cpu(cpu);
581         }
582
583         printk("Started cell \"%s\"\n", cell->config->name);
584
585 out_resume:
586         cell_resume(cpu_data);
587
588         return err;
589 }
590
591 static int cell_set_loadable(struct per_cpu *cpu_data, unsigned long id)
592 {
593         const struct jailhouse_memory *mem;
594         unsigned int cpu, n;
595         struct cell *cell;
596         int err;
597
598         err = cell_management_prologue(CELL_SET_LOADABLE, cpu_data, id, &cell);
599         if (err)
600                 return err;
601
602         for_each_cpu(cpu, cell->cpu_set) {
603                 per_cpu(cpu)->failed = false;
604                 arch_park_cpu(cpu);
605         }
606
607         if (cell->loadable)
608                 goto out_resume;
609
610         cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_SHUT_DOWN;
611         cell->loadable = true;
612
613         /* map all loadable memory regions into the root cell */
614         mem = jailhouse_cell_mem_regions(cell->config);
615         for (n = 0; n < cell->config->num_memory_regions; n++, mem++)
616                 if (mem->flags & JAILHOUSE_MEM_LOADABLE) {
617                         err = remap_to_root_cell(mem, ABORT_ON_ERROR);
618                         if (err)
619                                 goto out_resume;
620                 }
621
622         config_commit(NULL);
623
624         printk("Cell \"%s\" can be loaded\n", cell->config->name);
625
626 out_resume:
627         cell_resume(cpu_data);
628
629         return err;
630 }
631
632 static int cell_destroy(struct per_cpu *cpu_data, unsigned long id)
633 {
634         struct cell *cell, *previous;
635         int err;
636
637         err = cell_management_prologue(CELL_DESTROY, cpu_data, id, &cell);
638         if (err)
639                 return err;
640
641         printk("Closing cell \"%s\"\n", cell->config->name);
642
643         cell_destroy_internal(cpu_data, cell);
644
645         previous = &root_cell;
646         while (previous->next != cell)
647                 previous = previous->next;
648         previous->next = cell->next;
649         num_cells--;
650
651         page_free(&mem_pool, cell, cell->data_pages);
652         paging_dump_stats("after cell destruction");
653
654         cell_reconfig_completed();
655
656         cell_resume(cpu_data);
657
658         return 0;
659 }
660
661 static int cell_get_state(struct per_cpu *cpu_data, unsigned long id)
662 {
663         struct cell *cell;
664
665         if (cpu_data->cell != &root_cell)
666                 return -EPERM;
667
668         /*
669          * We do not need explicit synchronization with cell_create/destroy
670          * because their cell_suspend(root_cell) will not return before we left
671          * this hypercall.
672          */
673         for_each_cell(cell)
674                 if (cell->id == id) {
675                         u32 state = cell->comm_page.comm_region.cell_state;
676
677                         switch (state) {
678                         case JAILHOUSE_CELL_RUNNING:
679                         case JAILHOUSE_CELL_RUNNING_LOCKED:
680                         case JAILHOUSE_CELL_SHUT_DOWN:
681                         case JAILHOUSE_CELL_FAILED:
682                                 return state;
683                         default:
684                                 return -EINVAL;
685                         }
686                 }
687         return -ENOENT;
688 }
689
690 static int shutdown(struct per_cpu *cpu_data)
691 {
692         unsigned int this_cpu = cpu_data->cpu_id;
693         struct cell *cell;
694         unsigned int cpu;
695         int state, ret;
696
697         /* We do not support shutdown over non-root cells. */
698         if (cpu_data->cell != &root_cell)
699                 return -EPERM;
700
701         spin_lock(&shutdown_lock);
702
703         if (cpu_data->shutdown_state == SHUTDOWN_NONE) {
704                 state = SHUTDOWN_STARTED;
705                 for_each_non_root_cell(cell)
706                         if (!cell_shutdown_ok(cell))
707                                 state = -EPERM;
708
709                 if (state == SHUTDOWN_STARTED) {
710                         printk("Shutting down hypervisor\n");
711
712                         for_each_non_root_cell(cell) {
713                                 cell_suspend(cell, cpu_data);
714
715                                 printk("Closing cell \"%s\"\n",
716                                        cell->config->name);
717
718                                 for_each_cpu(cpu, cell->cpu_set) {
719                                         printk(" Releasing CPU %d\n", cpu);
720                                         arch_shutdown_cpu(cpu);
721                                 }
722                         }
723
724                         printk("Closing root cell \"%s\"\n",
725                                root_cell.config->name);
726                         arch_shutdown();
727                 }
728
729                 for_each_cpu(cpu, root_cell.cpu_set)
730                         per_cpu(cpu)->shutdown_state = state;
731         }
732
733         if (cpu_data->shutdown_state == SHUTDOWN_STARTED) {
734                 printk(" Releasing CPU %d\n", this_cpu);
735                 ret = 0;
736         } else
737                 ret = cpu_data->shutdown_state;
738         cpu_data->shutdown_state = SHUTDOWN_NONE;
739
740         spin_unlock(&shutdown_lock);
741
742         return ret;
743 }
744
745 static long hypervisor_get_info(struct per_cpu *cpu_data, unsigned long type)
746 {
747         switch (type) {
748         case JAILHOUSE_INFO_MEM_POOL_SIZE:
749                 return mem_pool.pages;
750         case JAILHOUSE_INFO_MEM_POOL_USED:
751                 return mem_pool.used_pages;
752         case JAILHOUSE_INFO_REMAP_POOL_SIZE:
753                 return remap_pool.pages;
754         case JAILHOUSE_INFO_REMAP_POOL_USED:
755                 return remap_pool.used_pages;
756         case JAILHOUSE_INFO_NUM_CELLS:
757                 return num_cells;
758         default:
759                 return -EINVAL;
760         }
761 }
762
763 static int cpu_get_info(struct per_cpu *cpu_data, unsigned long cpu_id,
764                         unsigned long type)
765 {
766         if (!cpu_id_valid(cpu_id))
767                 return -EINVAL;
768
769         /*
770          * We do not need explicit synchronization with cell_destroy because
771          * its cell_suspend(root_cell + this_cell) will not return before we
772          * left this hypercall.
773          */
774         if (cpu_data->cell != &root_cell &&
775             !cell_owns_cpu(cpu_data->cell, cpu_id))
776                 return -EPERM;
777
778         if (type == JAILHOUSE_CPU_INFO_STATE) {
779                 return per_cpu(cpu_id)->failed ? JAILHOUSE_CPU_FAILED :
780                         JAILHOUSE_CPU_RUNNING;
781         } else if (type >= JAILHOUSE_CPU_INFO_STAT_BASE &&
782                 type - JAILHOUSE_CPU_INFO_STAT_BASE < JAILHOUSE_NUM_CPU_STATS) {
783                 type -= JAILHOUSE_CPU_INFO_STAT_BASE;
784                 return per_cpu(cpu_id)->stats[type] & BIT_MASK(30, 0);
785         } else
786                 return -EINVAL;
787 }
788
789 /**
790  * Handle hypercall invoked by a cell.
791  * @param code          Hypercall code.
792  * @param arg1          First hypercall argument.
793  * @param arg2          Seconds hypercall argument.
794  *
795  * @return Value that shall be passed to the caller of the hypercall on return.
796  *
797  * @note If @c arg1 and @c arg2 are valid depends on the hypercall code.
798  */
799 long hypercall(unsigned long code, unsigned long arg1, unsigned long arg2)
800 {
801         struct per_cpu *cpu_data = this_cpu_data();
802
803         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_HYPERCALL]++;
804
805         switch (code) {
806         case JAILHOUSE_HC_DISABLE:
807                 return shutdown(cpu_data);
808         case JAILHOUSE_HC_CELL_CREATE:
809                 return cell_create(cpu_data, arg1);
810         case JAILHOUSE_HC_CELL_START:
811                 return cell_start(cpu_data, arg1);
812         case JAILHOUSE_HC_CELL_SET_LOADABLE:
813                 return cell_set_loadable(cpu_data, arg1);
814         case JAILHOUSE_HC_CELL_DESTROY:
815                 return cell_destroy(cpu_data, arg1);
816         case JAILHOUSE_HC_HYPERVISOR_GET_INFO:
817                 return hypervisor_get_info(cpu_data, arg1);
818         case JAILHOUSE_HC_CELL_GET_STATE:
819                 return cell_get_state(cpu_data, arg1);
820         case JAILHOUSE_HC_CPU_GET_INFO:
821                 return cpu_get_info(cpu_data, arg1, arg2);
822         default:
823                 return -ENOSYS;
824         }
825 }
826
827 /**
828  * Stops the current CPU on panic and prevents any execution on it until the
829  * system is rebooted.
830  *
831  * @note This service should be used when facing an unrecoverable error of the
832  * hypervisor.
833  *
834  * @see panic_park
835  */
836 void __attribute__((noreturn)) panic_stop(void)
837 {
838         panic_printk("Stopping CPU %d (Cell: \"%s\")\n", this_cpu_id(),
839                      this_cell()->config->name);
840
841         if (phys_processor_id() == panic_cpu)
842                 panic_in_progress = 0;
843
844         arch_panic_stop();
845 }
846
847 /**
848  * Parks the current CPU on panic, allowing to restart it by resetting the
849  * cell's CPU state.
850  *
851  * @note This service should be used when facing an error of a cell CPU, e.g. a
852  * cell boundary violation.
853  *
854  * @see panic_stop
855  */
856 void panic_park(void)
857 {
858         struct cell *cell = this_cell();
859         bool cell_failed = true;
860         unsigned int cpu;
861
862         panic_printk("Parking CPU %d (Cell: \"%s\")\n", this_cpu_id(),
863                      cell->config->name);
864
865         this_cpu_data()->failed = true;
866         for_each_cpu(cpu, cell->cpu_set)
867                 if (!per_cpu(cpu)->failed) {
868                         cell_failed = false;
869                         break;
870                 }
871         if (cell_failed)
872                 cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_FAILED;
873
874         arch_panic_park();
875
876         if (phys_processor_id() == panic_cpu)
877                 panic_in_progress = 0;
878 }