2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013-2015
5 * Copyright (c) Valentine Sinitsyn, 2014
8 * Jan Kiszka <jan.kiszka@siemens.com>
9 * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/device.h>
20 #include <linux/miscdevice.h>
21 #include <linux/firmware.h>
23 #include <linux/slab.h>
24 #include <linux/smp.h>
25 #include <linux/uaccess.h>
26 #include <linux/reboot.h>
27 #include <linux/vmalloc.h>
30 #include <asm/cacheflush.h>
31 #include <asm/tlbflush.h>
34 #include "jailhouse.h"
37 #include <jailhouse/header.h>
38 #include <jailhouse/hypercall.h>
39 #include <generated/version.h>
42 #error 64-bit kernel required!
45 #if JAILHOUSE_CELL_ID_NAMELEN != JAILHOUSE_CELL_NAME_MAXLEN
46 # warning JAILHOUSE_CELL_ID_NAMELEN and JAILHOUSE_CELL_NAME_MAXLEN out of sync!
49 /* For compatibility with older kernel versions */
50 #include <linux/version.h>
52 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
53 #define DEVICE_ATTR_RO(_name) \
54 struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
57 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
58 static ssize_t kobj_attr_show(struct kobject *kobj, struct attribute *attr,
61 struct kobj_attribute *kattr;
64 kattr = container_of(attr, struct kobj_attribute, attr);
66 ret = kattr->show(kobj, kattr, buf);
70 static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
71 const char *buf, size_t count)
73 struct kobj_attribute *kattr;
76 kattr = container_of(attr, struct kobj_attribute, attr);
78 ret = kattr->store(kobj, kattr, buf, count);
82 static const struct sysfs_ops cell_sysfs_ops = {
83 .show = kobj_attr_show,
84 .store = kobj_attr_store,
86 #define kobj_sysfs_ops cell_sysfs_ops
88 /* End of compatibility section - remove as version become obsolete */
91 #define JAILHOUSE_AMD_FW_NAME "jailhouse-amd.bin"
92 #define JAILHOUSE_INTEL_FW_NAME "jailhouse-intel.bin"
94 #define JAILHOUSE_FW_NAME "jailhouse.bin"
97 MODULE_DESCRIPTION("Management driver for Jailhouse partitioning hypervisor");
98 MODULE_LICENSE("GPL");
100 MODULE_FIRMWARE(JAILHOUSE_AMD_FW_NAME);
101 MODULE_FIRMWARE(JAILHOUSE_INTEL_FW_NAME);
103 MODULE_FIRMWARE(JAILHOUSE_FW_NAME);
105 MODULE_VERSION(JAILHOUSE_VERSION);
107 static struct device *jailhouse_dev;
108 static DEFINE_MUTEX(lock);
110 static void *hypervisor_mem;
111 static unsigned long hv_core_and_percpu_size;
112 static cpumask_t offlined_cpus;
113 static atomic_t call_done;
114 static int error_code;
115 static LIST_HEAD(cells);
116 static struct cell *root_cell;
117 static struct kobject *cells_dir;
120 bool jailhouse_use_vmcall;
122 static void init_hypercall(void)
124 jailhouse_use_vmcall = boot_cpu_has(X86_FEATURE_VMX);
126 #else /* !CONFIG_X86 */
127 static void init_hypercall(void)
132 struct jailhouse_cpu_stats_attr {
133 struct kobj_attribute kattr;
137 static ssize_t stats_show(struct kobject *kobj, struct kobj_attribute *attr,
140 struct jailhouse_cpu_stats_attr *stats_attr =
141 container_of(attr, struct jailhouse_cpu_stats_attr, kattr);
142 unsigned int code = JAILHOUSE_CPU_INFO_STAT_BASE + stats_attr->code;
143 struct cell *cell = container_of(kobj, struct cell, kobj);
144 unsigned long sum = 0;
148 for_each_cpu(cpu, &cell->cpus_assigned) {
149 value = jailhouse_call_arg2(JAILHOUSE_HC_CPU_GET_INFO, cpu,
155 return sprintf(buffer, "%lu\n", sum);
158 #define JAILHOUSE_CPU_STATS_ATTR(_name, _code) \
159 static struct jailhouse_cpu_stats_attr _name##_attr = { \
160 .kattr = __ATTR(_name, S_IRUGO, stats_show, NULL), \
164 JAILHOUSE_CPU_STATS_ATTR(vmexits_total, JAILHOUSE_CPU_STAT_VMEXITS_TOTAL);
165 JAILHOUSE_CPU_STATS_ATTR(vmexits_mmio, JAILHOUSE_CPU_STAT_VMEXITS_MMIO);
166 JAILHOUSE_CPU_STATS_ATTR(vmexits_management,
167 JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT);
168 JAILHOUSE_CPU_STATS_ATTR(vmexits_hypercall,
169 JAILHOUSE_CPU_STAT_VMEXITS_HYPERCALL);
171 JAILHOUSE_CPU_STATS_ATTR(vmexits_pio, JAILHOUSE_CPU_STAT_VMEXITS_PIO);
172 JAILHOUSE_CPU_STATS_ATTR(vmexits_xapic, JAILHOUSE_CPU_STAT_VMEXITS_XAPIC);
173 JAILHOUSE_CPU_STATS_ATTR(vmexits_cr, JAILHOUSE_CPU_STAT_VMEXITS_CR);
174 JAILHOUSE_CPU_STATS_ATTR(vmexits_msr, JAILHOUSE_CPU_STAT_VMEXITS_MSR);
175 JAILHOUSE_CPU_STATS_ATTR(vmexits_cpuid, JAILHOUSE_CPU_STAT_VMEXITS_CPUID);
176 JAILHOUSE_CPU_STATS_ATTR(vmexits_xsetbv, JAILHOUSE_CPU_STAT_VMEXITS_XSETBV);
177 #elif defined(CONFIG_ARM)
178 JAILHOUSE_CPU_STATS_ATTR(vmexits_maintenance, JAILHOUSE_CPU_STAT_VMEXITS_MAINTENANCE);
179 JAILHOUSE_CPU_STATS_ATTR(vmexits_virt_irq, JAILHOUSE_CPU_STAT_VMEXITS_VIRQ);
180 JAILHOUSE_CPU_STATS_ATTR(vmexits_virt_sgi, JAILHOUSE_CPU_STAT_VMEXITS_VSGI);
183 static struct attribute *no_attrs[] = {
184 &vmexits_total_attr.kattr.attr,
185 &vmexits_mmio_attr.kattr.attr,
186 &vmexits_management_attr.kattr.attr,
187 &vmexits_hypercall_attr.kattr.attr,
189 &vmexits_pio_attr.kattr.attr,
190 &vmexits_xapic_attr.kattr.attr,
191 &vmexits_cr_attr.kattr.attr,
192 &vmexits_msr_attr.kattr.attr,
193 &vmexits_cpuid_attr.kattr.attr,
194 &vmexits_xsetbv_attr.kattr.attr,
195 #elif defined(CONFIG_ARM)
196 &vmexits_maintenance_attr.kattr.attr,
197 &vmexits_virt_irq_attr.kattr.attr,
198 &vmexits_virt_sgi_attr.kattr.attr,
203 static struct attribute_group stats_attr_group = {
208 static ssize_t id_show(struct kobject *kobj, struct kobj_attribute *attr,
211 struct cell *cell = container_of(kobj, struct cell, kobj);
213 return sprintf(buffer, "%u\n", cell->id);
216 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
219 struct cell *cell = container_of(kobj, struct cell, kobj);
221 switch (jailhouse_call_arg1(JAILHOUSE_HC_CELL_GET_STATE, cell->id)) {
222 case JAILHOUSE_CELL_RUNNING:
223 return sprintf(buffer, "running\n");
224 case JAILHOUSE_CELL_RUNNING_LOCKED:
225 return sprintf(buffer, "running/locked\n");
226 case JAILHOUSE_CELL_SHUT_DOWN:
227 return sprintf(buffer, "shut down\n");
228 case JAILHOUSE_CELL_FAILED:
229 return sprintf(buffer, "failed\n");
231 return sprintf(buffer, "invalid\n");
235 static ssize_t cpus_assigned_show(struct kobject *kobj,
236 struct kobj_attribute *attr, char *buf)
238 struct cell *cell = container_of(kobj, struct cell, kobj);
241 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)
242 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
243 cpumask_pr_args(&cell->cpus_assigned));
245 written = cpumask_scnprintf(buf, PAGE_SIZE, &cell->cpus_assigned);
246 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
251 static ssize_t cpus_failed_show(struct kobject *kobj,
252 struct kobj_attribute *attr, char *buf)
254 struct cell *cell = container_of(kobj, struct cell, kobj);
255 cpumask_var_t cpus_failed;
259 if (!zalloc_cpumask_var(&cpus_failed, GFP_KERNEL))
262 for_each_cpu(cpu, &cell->cpus_assigned)
263 if (jailhouse_call_arg2(JAILHOUSE_HC_CPU_GET_INFO, cpu,
264 JAILHOUSE_CPU_INFO_STATE) ==
265 JAILHOUSE_CPU_FAILED)
266 cpu_set(cpu, *cpus_failed);
268 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)
269 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
270 cpumask_pr_args(cpus_failed));
272 written = cpumask_scnprintf(buf, PAGE_SIZE, cpus_failed);
273 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
276 free_cpumask_var(cpus_failed);
281 static struct kobj_attribute cell_id_attr = __ATTR_RO(id);
282 static struct kobj_attribute cell_state_attr = __ATTR_RO(state);
283 static struct kobj_attribute cell_cpus_assigned_attr =
284 __ATTR_RO(cpus_assigned);
285 static struct kobj_attribute cell_cpus_failed_attr = __ATTR_RO(cpus_failed);
287 static struct attribute *cell_attrs[] = {
289 &cell_state_attr.attr,
290 &cell_cpus_assigned_attr.attr,
291 &cell_cpus_failed_attr.attr,
295 static void cell_kobj_release(struct kobject *kobj)
297 struct cell *cell = container_of(kobj, struct cell, kobj);
299 jailhouse_pci_cell_cleanup(cell);
300 vfree(cell->memory_regions);
304 static struct kobj_type cell_type = {
305 .release = cell_kobj_release,
306 .sysfs_ops = &kobj_sysfs_ops,
307 .default_attrs = cell_attrs,
310 static struct cell *create_cell(const struct jailhouse_cell_desc *cell_desc)
315 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
317 return ERR_PTR(-ENOMEM);
319 INIT_LIST_HEAD(&cell->entry);
321 bitmap_copy(cpumask_bits(&cell->cpus_assigned),
322 jailhouse_cell_cpu_set(cell_desc),
323 min(nr_cpumask_bits, (int)cell_desc->cpu_set_size * 8));
325 cell->num_memory_regions = cell_desc->num_memory_regions;
326 cell->memory_regions = vmalloc(sizeof(struct jailhouse_memory) *
327 cell->num_memory_regions);
328 if (!cell->memory_regions) {
330 return ERR_PTR(-ENOMEM);
333 memcpy(cell->memory_regions, jailhouse_cell_mem_regions(cell_desc),
334 sizeof(struct jailhouse_memory) * cell->num_memory_regions);
336 err = jailhouse_pci_cell_setup(cell, cell_desc);
338 vfree(cell->memory_regions);
343 err = kobject_init_and_add(&cell->kobj, &cell_type, cells_dir, "%s",
346 cell_kobj_release(&cell->kobj);
350 err = sysfs_create_group(&cell->kobj, &stats_attr_group);
352 kobject_put(&cell->kobj);
359 static void register_cell(struct cell *cell)
361 list_add_tail(&cell->entry, &cells);
362 kobject_uevent(&cell->kobj, KOBJ_ADD);
365 static struct cell *find_cell(struct jailhouse_cell_id *cell_id)
369 list_for_each_entry(cell, &cells, entry)
370 if (cell_id->id == cell->id ||
371 (cell_id->id == JAILHOUSE_CELL_ID_UNUSED &&
372 strcmp(kobject_name(&cell->kobj), cell_id->name) == 0))
377 static void delete_cell(struct cell *cell)
379 list_del(&cell->entry);
380 sysfs_remove_group(&cell->kobj, &stats_attr_group);
381 kobject_put(&cell->kobj);
384 static long get_max_cpus(u32 cpu_set_size,
385 const struct jailhouse_system __user *system_config)
388 (u8 __user *)jailhouse_cell_cpu_set(&system_config->root_cell);
389 unsigned int pos = cpu_set_size;
394 if (get_user(bitmap, cpu_set + pos))
396 max_cpu_id = fls(bitmap);
398 return pos * 8 + max_cpu_id;
403 static void *jailhouse_ioremap(phys_addr_t phys, unsigned long virt,
406 struct vm_struct *vma;
408 size = PAGE_ALIGN(size);
410 vma = __get_vm_area(size, VM_IOREMAP, virt,
411 virt + size + PAGE_SIZE);
413 vma = __get_vm_area(size, VM_IOREMAP, VMALLOC_START,
417 vma->phys_addr = phys;
419 if (ioremap_page_range((unsigned long)vma->addr,
420 (unsigned long)vma->addr + size, phys,
429 static void enter_hypervisor(void *info)
431 struct jailhouse_header *header = info;
432 unsigned int cpu = smp_processor_id();
435 if (cpu < header->max_cpus)
436 /* either returns 0 or the same error code across all CPUs */
437 err = header->entry(cpu);
444 #if defined(CONFIG_X86) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)
445 /* on Intel, VMXE is now on - update the shadow */
449 atomic_inc(&call_done);
452 static inline const char * jailhouse_fw_name(void)
455 if (boot_cpu_has(X86_FEATURE_SVM))
456 return JAILHOUSE_AMD_FW_NAME;
457 if (boot_cpu_has(X86_FEATURE_VMX))
458 return JAILHOUSE_INTEL_FW_NAME;
461 return JAILHOUSE_FW_NAME;
465 static int jailhouse_enable(struct jailhouse_system __user *arg)
467 const struct firmware *hypervisor;
468 struct jailhouse_system config_header;
469 struct jailhouse_system *config;
470 struct jailhouse_memory *hv_mem = &config_header.hypervisor_memory;
471 struct jailhouse_header *header;
472 void __iomem *uart = NULL;
473 unsigned long config_size;
478 fw_name = jailhouse_fw_name();
480 pr_err("jailhouse: Missing or unsupported HVM technology\n");
484 if (copy_from_user(&config_header, arg, sizeof(config_header)))
486 config_header.root_cell.name[JAILHOUSE_CELL_NAME_MAXLEN] = 0;
488 max_cpus = get_max_cpus(config_header.root_cell.cpu_set_size, arg);
491 if (max_cpus > UINT_MAX)
494 if (mutex_lock_interruptible(&lock) != 0)
498 if (enabled || !try_module_get(THIS_MODULE))
501 err = request_firmware(&hypervisor, fw_name, jailhouse_dev);
503 pr_err("jailhouse: Missing hypervisor image %s\n", fw_name);
504 goto error_put_module;
507 header = (struct jailhouse_header *)hypervisor->data;
510 if (memcmp(header->signature, JAILHOUSE_SIGNATURE,
511 sizeof(header->signature)) != 0)
512 goto error_release_fw;
514 hv_core_and_percpu_size = PAGE_ALIGN(header->core_size) +
515 max_cpus * header->percpu_size;
516 config_size = jailhouse_system_config_size(&config_header);
517 if (hv_mem->size <= hv_core_and_percpu_size + config_size)
518 goto error_release_fw;
520 hypervisor_mem = jailhouse_ioremap(hv_mem->phys_start, JAILHOUSE_BASE,
522 if (!hypervisor_mem) {
523 pr_err("jailhouse: Unable to map RAM reserved for hypervisor "
524 "at %08lx\n", (unsigned long)hv_mem->phys_start);
525 goto error_release_fw;
528 memcpy(hypervisor_mem, hypervisor->data, hypervisor->size);
529 memset(hypervisor_mem + hypervisor->size, 0,
530 hv_mem->size - hypervisor->size);
532 header = (struct jailhouse_header *)hypervisor_mem;
533 header->max_cpus = max_cpus;
535 config = (struct jailhouse_system *)
536 (hypervisor_mem + hv_core_and_percpu_size);
537 if (copy_from_user(config, arg, config_size)) {
542 if (config->debug_uart.flags & JAILHOUSE_MEM_IO) {
543 uart = ioremap(config->debug_uart.phys_start,
544 config->debug_uart.size);
547 pr_err("jailhouse: Unable to map hypervisor UART at "
549 (unsigned long)config->debug_uart.phys_start);
552 header->debug_uart_base = (void *)uart;
555 root_cell = create_cell(&config->root_cell);
556 if (IS_ERR(root_cell)) {
557 err = PTR_ERR(root_cell);
561 cpumask_and(&root_cell->cpus_assigned, &root_cell->cpus_assigned,
568 header->online_cpus = num_online_cpus();
570 atomic_set(&call_done, 0);
571 on_each_cpu(enter_hypervisor, header, 0);
572 while (atomic_read(&call_done) != num_online_cpus())
579 goto error_free_cell;
582 jailhouse_pci_do_all_devices(root_cell, JAILHOUSE_PCI_TYPE_IVSHMEM,
583 JAILHOUSE_PCI_ACTION_ADD);
588 release_firmware(hypervisor);
592 register_cell(root_cell);
596 pr_info("The Jailhouse is opening.\n");
601 delete_cell(root_cell);
604 vunmap(hypervisor_mem);
609 release_firmware(hypervisor);
612 module_put(THIS_MODULE);
619 static void leave_hypervisor(void *info)
625 /* Touch each hypervisor page we may need during the switch so that
626 * the active mm definitely contains all mappings. At least x86 does
627 * not support taking any faults while switching worlds. */
628 for (page = hypervisor_mem, size = hv_core_and_percpu_size; size > 0;
629 size -= PAGE_SIZE, page += PAGE_SIZE)
632 /* either returns 0 or the same error code across all CPUs */
633 err = jailhouse_call(JAILHOUSE_HC_DISABLE);
637 #if defined(CONFIG_X86) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)
638 /* on Intel, VMXE is now off - update the shadow */
642 atomic_inc(&call_done);
645 static int jailhouse_disable(void)
647 struct cell *cell, *tmp;
651 if (mutex_lock_interruptible(&lock) != 0)
663 atomic_set(&call_done, 0);
664 on_each_cpu(leave_hypervisor, NULL, 0);
665 while (atomic_read(&call_done) != num_online_cpus())
674 vunmap(hypervisor_mem);
676 for_each_cpu(cpu, &offlined_cpus) {
677 if (cpu_up(cpu) != 0)
678 pr_err("Jailhouse: failed to bring CPU %d back "
680 cpu_clear(cpu, offlined_cpus);
683 jailhouse_pci_do_all_devices(root_cell, JAILHOUSE_PCI_TYPE_IVSHMEM,
684 JAILHOUSE_PCI_ACTION_DEL);
686 list_for_each_entry_safe(cell, tmp, &cells, entry)
689 module_put(THIS_MODULE);
691 pr_info("The Jailhouse was closed.\n");
699 static int jailhouse_cell_create(struct jailhouse_cell_create __user *arg)
701 struct jailhouse_cell_create cell_params;
702 struct jailhouse_cell_desc *config;
703 struct jailhouse_cell_id cell_id;
708 if (copy_from_user(&cell_params, arg, sizeof(cell_params)))
711 config = kmalloc(cell_params.config_size, GFP_KERNEL | GFP_DMA);
715 if (copy_from_user(config,
716 (void *)(unsigned long)cell_params.config_address,
717 cell_params.config_size)) {
719 goto kfree_config_out;
721 config->name[JAILHOUSE_CELL_NAME_MAXLEN] = 0;
723 if (mutex_lock_interruptible(&lock) != 0) {
725 goto kfree_config_out;
733 cell_id.id = JAILHOUSE_CELL_ID_UNUSED;
734 memcpy(cell_id.name, config->name, sizeof(cell_id.name));
735 if (find_cell(&cell_id) != NULL) {
740 cell = create_cell(config);
746 if (!cpumask_subset(&cell->cpus_assigned, &root_cell->cpus_assigned)) {
748 goto error_cell_delete;
751 for_each_cpu(cpu, &cell->cpus_assigned) {
752 if (cpu_online(cpu)) {
755 goto error_cpu_online;
756 cpu_set(cpu, offlined_cpus);
758 cpu_clear(cpu, root_cell->cpus_assigned);
761 id = jailhouse_call_arg1(JAILHOUSE_HC_CELL_CREATE, __pa(config));
764 goto error_cpu_online;
770 pr_info("Created Jailhouse cell \"%s\"\n", config->name);
781 for_each_cpu(cpu, &cell->cpus_assigned) {
782 if (!cpu_online(cpu) && cpu_up(cpu) == 0)
783 cpu_clear(cpu, offlined_cpus);
784 cpu_set(cpu, root_cell->cpus_assigned);
792 static int cell_management_prologue(struct jailhouse_cell_id *cell_id,
793 struct cell **cell_ptr)
795 cell_id->name[JAILHOUSE_CELL_ID_NAMELEN] = 0;
797 if (mutex_lock_interruptible(&lock) != 0)
805 *cell_ptr = find_cell(cell_id);
806 if (*cell_ptr == NULL) {
813 #define MEM_REQ_FLAGS (JAILHOUSE_MEM_WRITE | JAILHOUSE_MEM_LOADABLE)
815 static int load_image(struct cell *cell,
816 struct jailhouse_preload_image __user *uimage)
818 struct jailhouse_preload_image image;
819 const struct jailhouse_memory *mem;
820 unsigned int regions;
825 if (copy_from_user(&image, uimage, sizeof(image)))
828 mem = cell->memory_regions;
829 for (regions = cell->num_memory_regions; regions > 0; regions--) {
830 image_offset = image.target_address - mem->virt_start;
831 if (image.target_address >= mem->virt_start &&
832 image_offset < mem->size) {
833 if (image.size > mem->size - image_offset ||
834 (mem->flags & MEM_REQ_FLAGS) != MEM_REQ_FLAGS)
843 image_mem = jailhouse_ioremap(mem->phys_start + image_offset, 0,
846 pr_err("jailhouse: Unable to map cell RAM at %08llx "
847 "for image loading\n",
848 (unsigned long long)(mem->phys_start + image_offset));
852 if (copy_from_user(image_mem,
853 (void *)(unsigned long)image.source_address,
862 static int jailhouse_cell_load(struct jailhouse_cell_load __user *arg)
864 struct jailhouse_preload_image __user *image = arg->image;
865 struct jailhouse_cell_load cell_load;
870 if (copy_from_user(&cell_load, arg, sizeof(cell_load)))
873 err = cell_management_prologue(&cell_load.cell_id, &cell);
877 err = jailhouse_call_arg1(JAILHOUSE_HC_CELL_SET_LOADABLE, cell->id);
881 for (n = cell_load.num_preload_images; n > 0; n--, image++) {
882 err = load_image(cell, image);
893 static int jailhouse_cell_start(const char __user *arg)
895 struct jailhouse_cell_id cell_id;
899 if (copy_from_user(&cell_id, arg, sizeof(cell_id)))
902 err = cell_management_prologue(&cell_id, &cell);
906 err = jailhouse_call_arg1(JAILHOUSE_HC_CELL_START, cell->id);
913 static int jailhouse_cell_destroy(const char __user *arg)
915 struct jailhouse_cell_id cell_id;
920 if (copy_from_user(&cell_id, arg, sizeof(cell_id)))
923 err = cell_management_prologue(&cell_id, &cell);
927 err = jailhouse_call_arg1(JAILHOUSE_HC_CELL_DESTROY, cell->id);
931 for_each_cpu(cpu, &cell->cpus_assigned) {
932 if (cpu_isset(cpu, offlined_cpus)) {
933 if (cpu_up(cpu) != 0)
934 pr_err("Jailhouse: failed to bring CPU %d "
935 "back online\n", cpu);
936 cpu_clear(cpu, offlined_cpus);
938 cpu_set(cpu, root_cell->cpus_assigned);
941 pr_info("Destroyed Jailhouse cell \"%s\"\n",
942 kobject_name(&cell->kobj));
952 static long jailhouse_ioctl(struct file *file, unsigned int ioctl,
958 case JAILHOUSE_ENABLE:
959 err = jailhouse_enable(
960 (struct jailhouse_system __user *)arg);
962 case JAILHOUSE_DISABLE:
963 err = jailhouse_disable();
965 case JAILHOUSE_CELL_CREATE:
966 err = jailhouse_cell_create(
967 (struct jailhouse_cell_create __user *)arg);
969 case JAILHOUSE_CELL_LOAD:
970 err = jailhouse_cell_load(
971 (struct jailhouse_cell_load __user *)arg);
973 case JAILHOUSE_CELL_START:
974 err = jailhouse_cell_start((const char __user *)arg);
976 case JAILHOUSE_CELL_DESTROY:
977 err = jailhouse_cell_destroy((const char __user *)arg);
987 static const struct file_operations jailhouse_fops = {
988 .owner = THIS_MODULE,
989 .unlocked_ioctl = jailhouse_ioctl,
990 .compat_ioctl = jailhouse_ioctl,
991 .llseek = noop_llseek,
994 static struct miscdevice jailhouse_misc_dev = {
995 .minor = MISC_DYNAMIC_MINOR,
997 .fops = &jailhouse_fops,
1000 static int jailhouse_shutdown_notify(struct notifier_block *unused1,
1001 unsigned long unused2, void *unused3)
1005 err = jailhouse_disable();
1006 if (err && err != -EINVAL)
1007 pr_emerg("jailhouse: ordered shutdown failed!\n");
1012 static struct notifier_block jailhouse_shutdown_nb = {
1013 .notifier_call = jailhouse_shutdown_notify,
1016 static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
1019 return sprintf(buffer, "%d\n", enabled);
1022 static ssize_t info_show(struct device *dev, char *buffer, unsigned int type)
1027 if (mutex_lock_interruptible(&lock) != 0)
1031 val = jailhouse_call_arg1(JAILHOUSE_HC_HYPERVISOR_GET_INFO,
1034 result = sprintf(buffer, "%ld\n", val);
1038 mutex_unlock(&lock);
1042 static ssize_t mem_pool_size_show(struct device *dev,
1043 struct device_attribute *attr, char *buffer)
1045 return info_show(dev, buffer, JAILHOUSE_INFO_MEM_POOL_SIZE);
1048 static ssize_t mem_pool_used_show(struct device *dev,
1049 struct device_attribute *attr, char *buffer)
1051 return info_show(dev, buffer, JAILHOUSE_INFO_MEM_POOL_USED);
1054 static ssize_t remap_pool_size_show(struct device *dev,
1055 struct device_attribute *attr,
1058 return info_show(dev, buffer, JAILHOUSE_INFO_REMAP_POOL_SIZE);
1061 static ssize_t remap_pool_used_show(struct device *dev,
1062 struct device_attribute *attr,
1065 return info_show(dev, buffer, JAILHOUSE_INFO_REMAP_POOL_USED);
1068 static DEVICE_ATTR_RO(enabled);
1069 static DEVICE_ATTR_RO(mem_pool_size);
1070 static DEVICE_ATTR_RO(mem_pool_used);
1071 static DEVICE_ATTR_RO(remap_pool_size);
1072 static DEVICE_ATTR_RO(remap_pool_used);
1074 static struct attribute *jailhouse_sysfs_entries[] = {
1075 &dev_attr_enabled.attr,
1076 &dev_attr_mem_pool_size.attr,
1077 &dev_attr_mem_pool_used.attr,
1078 &dev_attr_remap_pool_size.attr,
1079 &dev_attr_remap_pool_used.attr,
1083 static struct attribute_group jailhouse_attribute_group = {
1085 .attrs = jailhouse_sysfs_entries,
1088 static int __init jailhouse_init(void)
1092 jailhouse_dev = root_device_register("jailhouse");
1093 if (IS_ERR(jailhouse_dev))
1094 return PTR_ERR(jailhouse_dev);
1096 err = sysfs_create_group(&jailhouse_dev->kobj,
1097 &jailhouse_attribute_group);
1101 cells_dir = kobject_create_and_add("cells", &jailhouse_dev->kobj);
1107 err = misc_register(&jailhouse_misc_dev);
1109 goto remove_cells_dir;
1111 register_reboot_notifier(&jailhouse_shutdown_nb);
1118 kobject_put(cells_dir);
1121 sysfs_remove_group(&jailhouse_dev->kobj, &jailhouse_attribute_group);
1124 root_device_unregister(jailhouse_dev);
1128 static void __exit jailhouse_exit(void)
1130 unregister_reboot_notifier(&jailhouse_shutdown_nb);
1131 misc_deregister(&jailhouse_misc_dev);
1132 kobject_put(cells_dir);
1133 sysfs_remove_group(&jailhouse_dev->kobj, &jailhouse_attribute_group);
1134 root_device_unregister(jailhouse_dev);
1137 module_init(jailhouse_init);
1138 module_exit(jailhouse_exit);