2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013, 2014
5 * Copyright (c) Valentine Sinitsyn, 2014
8 * Jan Kiszka <jan.kiszka@siemens.com>
9 * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/cpu.h>
18 #include <linux/device.h>
20 #include <linux/miscdevice.h>
21 #include <linux/firmware.h>
23 #include <linux/slab.h>
24 #include <linux/smp.h>
25 #include <linux/uaccess.h>
26 #include <linux/reboot.h>
27 #include <linux/vmalloc.h>
29 #include <linux/pci.h>
31 #include <asm/cacheflush.h>
32 #include <asm/tlbflush.h>
34 #include "jailhouse.h"
35 #include <jailhouse/cell-config.h>
36 #include <jailhouse/header.h>
37 #include <jailhouse/hypercall.h>
38 #include <jailhouse/version.h>
41 #error 64-bit kernel required!
44 #if JAILHOUSE_CELL_ID_NAMELEN != JAILHOUSE_CELL_NAME_MAXLEN
45 # warning JAILHOUSE_CELL_ID_NAMELEN and JAILHOUSE_CELL_NAME_MAXLEN out of sync!
48 /* For compatibility with older kernel versions */
49 #include <linux/version.h>
51 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
52 #define DEVICE_ATTR_RO(_name) \
53 struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
57 static ssize_t kobj_attr_show(struct kobject *kobj, struct attribute *attr,
60 struct kobj_attribute *kattr;
63 kattr = container_of(attr, struct kobj_attribute, attr);
65 ret = kattr->show(kobj, kattr, buf);
69 static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
70 const char *buf, size_t count)
72 struct kobj_attribute *kattr;
75 kattr = container_of(attr, struct kobj_attribute, attr);
77 ret = kattr->store(kobj, kattr, buf, count);
81 static const struct sysfs_ops cell_sysfs_ops = {
82 .show = kobj_attr_show,
83 .store = kobj_attr_store,
85 #define kobj_sysfs_ops cell_sysfs_ops
87 /* End of compatibility section - remove as version become obsolete */
90 #define JAILHOUSE_AMD_FW_NAME "jailhouse-amd.bin"
91 #define JAILHOUSE_INTEL_FW_NAME "jailhouse-intel.bin"
93 #define JAILHOUSE_FW_NAME "jailhouse.bin"
98 struct list_head entry;
100 cpumask_t cpus_assigned;
101 u32 num_memory_regions;
103 struct jailhouse_memory *memory_regions;
104 struct jailhouse_pci_device *pci_devices;
107 MODULE_DESCRIPTION("Loader for Jailhouse partitioning hypervisor");
108 MODULE_LICENSE("GPL");
110 MODULE_FIRMWARE(JAILHOUSE_AMD_FW_NAME);
111 MODULE_FIRMWARE(JAILHOUSE_INTEL_FW_NAME);
113 MODULE_FIRMWARE(JAILHOUSE_FW_NAME);
115 MODULE_VERSION(JAILHOUSE_VERSION);
117 static struct device *jailhouse_dev;
118 static DEFINE_MUTEX(lock);
120 static void *hypervisor_mem;
121 static unsigned long hv_core_and_percpu_size;
122 static cpumask_t offlined_cpus;
123 static atomic_t call_done;
124 static int error_code;
125 static LIST_HEAD(cells);
126 static struct cell *root_cell;
127 static struct kobject *cells_dir;
130 bool jailhouse_use_vmcall;
132 static void init_hypercall(void)
134 jailhouse_use_vmcall = boot_cpu_has(X86_FEATURE_VMX);
136 #else /* !CONFIG_X86 */
137 static void init_hypercall(void)
142 enum { JAILHOUSE_PCI_ACTION_ADD, JAILHOUSE_PCI_ACTION_DEL };
145 static void jailhouse_pci_add_device(const struct jailhouse_pci_device *dev)
150 bus = pci_find_bus(dev->domain, PCI_BUS_NUM(dev->bdf));
152 num = pci_scan_slot(bus, dev->bdf & 0xff);
154 pci_lock_rescan_remove();
155 pci_bus_assign_resources(bus);
156 pci_bus_add_devices(bus);
157 pci_unlock_rescan_remove();
162 static void jailhouse_pci_remove_device(const struct jailhouse_pci_device *dev)
164 struct pci_dev *l_dev;
166 l_dev = pci_get_bus_and_slot(PCI_BUS_NUM(dev->bdf), dev->bdf & 0xff);
168 pci_stop_and_remove_bus_device_locked(l_dev);
171 static void jailhouse_pci_do_all_devices(struct cell *cell, unsigned int type,
175 const struct jailhouse_pci_device *dev;
177 dev = cell->pci_devices;
178 for (n = cell->num_pci_devices; n > 0; n--) {
179 if (dev->type == type) {
180 if (action == JAILHOUSE_PCI_ACTION_ADD)
181 jailhouse_pci_add_device(dev);
182 else if (action == JAILHOUSE_PCI_ACTION_DEL)
183 jailhouse_pci_remove_device(dev);
188 #else /* CONFIG_PCI */
189 static void jailhouse_pci_do_all_devices(struct cell *cell, unsigned int type,
193 #endif /* CONFIG_PCI */
195 struct jailhouse_cpu_stats_attr {
196 struct kobj_attribute kattr;
200 static ssize_t stats_show(struct kobject *kobj, struct kobj_attribute *attr,
203 struct jailhouse_cpu_stats_attr *stats_attr =
204 container_of(attr, struct jailhouse_cpu_stats_attr, kattr);
205 unsigned int code = JAILHOUSE_CPU_INFO_STAT_BASE + stats_attr->code;
206 struct cell *cell = container_of(kobj, struct cell, kobj);
207 unsigned long sum = 0;
211 for_each_cpu(cpu, &cell->cpus_assigned) {
212 value = jailhouse_call_arg2(JAILHOUSE_HC_CPU_GET_INFO, cpu,
218 return sprintf(buffer, "%lu\n", sum);
221 #define JAILHOUSE_CPU_STATS_ATTR(_name, _code) \
222 static struct jailhouse_cpu_stats_attr _name##_attr = { \
223 .kattr = __ATTR(_name, S_IRUGO, stats_show, NULL), \
227 JAILHOUSE_CPU_STATS_ATTR(vmexits_total, JAILHOUSE_CPU_STAT_VMEXITS_TOTAL);
228 JAILHOUSE_CPU_STATS_ATTR(vmexits_mmio, JAILHOUSE_CPU_STAT_VMEXITS_MMIO);
229 JAILHOUSE_CPU_STATS_ATTR(vmexits_management,
230 JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT);
231 JAILHOUSE_CPU_STATS_ATTR(vmexits_hypercall,
232 JAILHOUSE_CPU_STAT_VMEXITS_HYPERCALL);
234 JAILHOUSE_CPU_STATS_ATTR(vmexits_pio, JAILHOUSE_CPU_STAT_VMEXITS_PIO);
235 JAILHOUSE_CPU_STATS_ATTR(vmexits_xapic, JAILHOUSE_CPU_STAT_VMEXITS_XAPIC);
236 JAILHOUSE_CPU_STATS_ATTR(vmexits_cr, JAILHOUSE_CPU_STAT_VMEXITS_CR);
237 JAILHOUSE_CPU_STATS_ATTR(vmexits_msr, JAILHOUSE_CPU_STAT_VMEXITS_MSR);
238 JAILHOUSE_CPU_STATS_ATTR(vmexits_cpuid, JAILHOUSE_CPU_STAT_VMEXITS_CPUID);
239 JAILHOUSE_CPU_STATS_ATTR(vmexits_xsetbv, JAILHOUSE_CPU_STAT_VMEXITS_XSETBV);
240 #elif defined(CONFIG_ARM)
241 JAILHOUSE_CPU_STATS_ATTR(vmexits_maintenance, JAILHOUSE_CPU_STAT_VMEXITS_MAINTENANCE);
242 JAILHOUSE_CPU_STATS_ATTR(vmexits_virt_irq, JAILHOUSE_CPU_STAT_VMEXITS_VIRQ);
243 JAILHOUSE_CPU_STATS_ATTR(vmexits_virt_sgi, JAILHOUSE_CPU_STAT_VMEXITS_VSGI);
246 static struct attribute *no_attrs[] = {
247 &vmexits_total_attr.kattr.attr,
248 &vmexits_mmio_attr.kattr.attr,
249 &vmexits_management_attr.kattr.attr,
250 &vmexits_hypercall_attr.kattr.attr,
252 &vmexits_pio_attr.kattr.attr,
253 &vmexits_xapic_attr.kattr.attr,
254 &vmexits_cr_attr.kattr.attr,
255 &vmexits_msr_attr.kattr.attr,
256 &vmexits_cpuid_attr.kattr.attr,
257 &vmexits_xsetbv_attr.kattr.attr,
258 #elif defined(CONFIG_ARM)
259 &vmexits_maintenance_attr.kattr.attr,
260 &vmexits_virt_irq_attr.kattr.attr,
261 &vmexits_virt_sgi_attr.kattr.attr,
266 static struct attribute_group stats_attr_group = {
271 static ssize_t id_show(struct kobject *kobj, struct kobj_attribute *attr,
274 struct cell *cell = container_of(kobj, struct cell, kobj);
276 return sprintf(buffer, "%u\n", cell->id);
279 static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
282 struct cell *cell = container_of(kobj, struct cell, kobj);
284 switch (jailhouse_call_arg1(JAILHOUSE_HC_CELL_GET_STATE, cell->id)) {
285 case JAILHOUSE_CELL_RUNNING:
286 return sprintf(buffer, "running\n");
287 case JAILHOUSE_CELL_RUNNING_LOCKED:
288 return sprintf(buffer, "running/locked\n");
289 case JAILHOUSE_CELL_SHUT_DOWN:
290 return sprintf(buffer, "shut down\n");
291 case JAILHOUSE_CELL_FAILED:
292 return sprintf(buffer, "failed\n");
294 return sprintf(buffer, "invalid\n");
298 static ssize_t cpus_assigned_show(struct kobject *kobj,
299 struct kobj_attribute *attr, char *buf)
301 struct cell *cell = container_of(kobj, struct cell, kobj);
304 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)
305 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
306 cpumask_pr_args(&cell->cpus_assigned));
308 written = cpumask_scnprintf(buf, PAGE_SIZE, &cell->cpus_assigned);
309 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
314 static ssize_t cpus_failed_show(struct kobject *kobj,
315 struct kobj_attribute *attr, char *buf)
317 struct cell *cell = container_of(kobj, struct cell, kobj);
318 cpumask_var_t cpus_failed;
322 if (!zalloc_cpumask_var(&cpus_failed, GFP_KERNEL))
325 for_each_cpu(cpu, &cell->cpus_assigned)
326 if (jailhouse_call_arg2(JAILHOUSE_HC_CPU_GET_INFO, cpu,
327 JAILHOUSE_CPU_INFO_STATE) ==
328 JAILHOUSE_CPU_FAILED)
329 cpu_set(cpu, *cpus_failed);
331 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)
332 written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
333 cpumask_pr_args(cpus_failed));
335 written = cpumask_scnprintf(buf, PAGE_SIZE, cpus_failed);
336 written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
339 free_cpumask_var(cpus_failed);
344 static struct kobj_attribute cell_id_attr = __ATTR_RO(id);
345 static struct kobj_attribute cell_state_attr = __ATTR_RO(state);
346 static struct kobj_attribute cell_cpus_assigned_attr =
347 __ATTR_RO(cpus_assigned);
348 static struct kobj_attribute cell_cpus_failed_attr = __ATTR_RO(cpus_failed);
350 static struct attribute *cell_attrs[] = {
352 &cell_state_attr.attr,
353 &cell_cpus_assigned_attr.attr,
354 &cell_cpus_failed_attr.attr,
358 static void cell_kobj_release(struct kobject *kobj)
360 struct cell *cell = container_of(kobj, struct cell, kobj);
362 vfree(cell->memory_regions);
363 vfree(cell->pci_devices);
367 static struct kobj_type cell_type = {
368 .release = cell_kobj_release,
369 .sysfs_ops = &kobj_sysfs_ops,
370 .default_attrs = cell_attrs,
373 static struct cell *create_cell(const struct jailhouse_cell_desc *cell_desc)
378 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
380 return ERR_PTR(-ENOMEM);
382 INIT_LIST_HEAD(&cell->entry);
384 bitmap_copy(cpumask_bits(&cell->cpus_assigned),
385 jailhouse_cell_cpu_set(cell_desc),
386 min(nr_cpumask_bits, (int)cell_desc->cpu_set_size * 8));
388 cell->num_memory_regions = cell_desc->num_memory_regions;
389 cell->memory_regions = vmalloc(sizeof(struct jailhouse_memory) *
390 cell->num_memory_regions);
391 if (!cell->memory_regions) {
393 return ERR_PTR(-ENOMEM);
396 memcpy(cell->memory_regions, jailhouse_cell_mem_regions(cell_desc),
397 sizeof(struct jailhouse_memory) * cell->num_memory_regions);
399 cell->num_pci_devices = cell_desc->num_pci_devices;
400 cell->pci_devices = NULL;
402 if (cell->num_pci_devices > 0) {
404 vmalloc(sizeof(struct jailhouse_pci_device) *
405 cell->num_pci_devices);
406 if (!cell->pci_devices) {
407 vfree(cell->memory_regions);
409 return ERR_PTR(-ENOMEM);
412 memcpy(cell->pci_devices,
413 jailhouse_cell_pci_devices(cell_desc),
414 sizeof(struct jailhouse_pci_device) *
415 cell->num_pci_devices);
418 err = kobject_init_and_add(&cell->kobj, &cell_type, cells_dir, "%s",
421 cell_kobj_release(&cell->kobj);
425 err = sysfs_create_group(&cell->kobj, &stats_attr_group);
427 kobject_put(&cell->kobj);
434 static void register_cell(struct cell *cell)
436 list_add_tail(&cell->entry, &cells);
437 kobject_uevent(&cell->kobj, KOBJ_ADD);
440 static struct cell *find_cell(struct jailhouse_cell_id *cell_id)
444 list_for_each_entry(cell, &cells, entry)
445 if (cell_id->id == cell->id ||
446 (cell_id->id == JAILHOUSE_CELL_ID_UNUSED &&
447 strcmp(kobject_name(&cell->kobj), cell_id->name) == 0))
452 static void delete_cell(struct cell *cell)
454 list_del(&cell->entry);
455 sysfs_remove_group(&cell->kobj, &stats_attr_group);
456 kobject_put(&cell->kobj);
459 static long get_max_cpus(u32 cpu_set_size,
460 const struct jailhouse_system __user *system_config)
463 (u8 __user *)jailhouse_cell_cpu_set(&system_config->root_cell);
464 unsigned int pos = cpu_set_size;
469 if (get_user(bitmap, cpu_set + pos))
471 max_cpu_id = fls(bitmap);
473 return pos * 8 + max_cpu_id;
478 static void *jailhouse_ioremap(phys_addr_t phys, unsigned long virt,
481 struct vm_struct *vma;
483 size = PAGE_ALIGN(size);
485 vma = __get_vm_area(size, VM_IOREMAP, virt,
486 virt + size + PAGE_SIZE);
488 vma = __get_vm_area(size, VM_IOREMAP, VMALLOC_START,
492 vma->phys_addr = phys;
494 if (ioremap_page_range((unsigned long)vma->addr,
495 (unsigned long)vma->addr + size, phys,
504 static void enter_hypervisor(void *info)
506 struct jailhouse_header *header = info;
507 unsigned int cpu = smp_processor_id();
510 if (cpu < header->max_cpus)
511 /* either returns 0 or the same error code across all CPUs */
512 err = header->entry(cpu);
519 #if defined(CONFIG_X86) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)
520 /* on Intel, VMXE is now on - update the shadow */
524 atomic_inc(&call_done);
527 static inline const char * jailhouse_fw_name(void)
530 if (boot_cpu_has(X86_FEATURE_SVM))
531 return JAILHOUSE_AMD_FW_NAME;
532 if (boot_cpu_has(X86_FEATURE_VMX))
533 return JAILHOUSE_INTEL_FW_NAME;
536 return JAILHOUSE_FW_NAME;
540 static int jailhouse_enable(struct jailhouse_system __user *arg)
542 const struct firmware *hypervisor;
543 struct jailhouse_system config_header;
544 struct jailhouse_system *config;
545 struct jailhouse_memory *hv_mem = &config_header.hypervisor_memory;
546 struct jailhouse_header *header;
547 void __iomem *uart = NULL;
548 unsigned long config_size;
553 fw_name = jailhouse_fw_name();
555 pr_err("jailhouse: Missing or unsupported HVM technology\n");
559 if (copy_from_user(&config_header, arg, sizeof(config_header)))
561 config_header.root_cell.name[JAILHOUSE_CELL_NAME_MAXLEN] = 0;
563 max_cpus = get_max_cpus(config_header.root_cell.cpu_set_size, arg);
566 if (max_cpus > UINT_MAX)
569 if (mutex_lock_interruptible(&lock) != 0)
573 if (enabled || !try_module_get(THIS_MODULE))
576 err = request_firmware(&hypervisor, fw_name, jailhouse_dev);
578 pr_err("jailhouse: Missing hypervisor image %s\n", fw_name);
579 goto error_put_module;
582 header = (struct jailhouse_header *)hypervisor->data;
585 if (memcmp(header->signature, JAILHOUSE_SIGNATURE,
586 sizeof(header->signature)) != 0)
587 goto error_release_fw;
589 hv_core_and_percpu_size = PAGE_ALIGN(header->core_size) +
590 max_cpus * header->percpu_size;
591 config_size = jailhouse_system_config_size(&config_header);
592 if (hv_mem->size <= hv_core_and_percpu_size + config_size)
593 goto error_release_fw;
595 hypervisor_mem = jailhouse_ioremap(hv_mem->phys_start, JAILHOUSE_BASE,
597 if (!hypervisor_mem) {
598 pr_err("jailhouse: Unable to map RAM reserved for hypervisor "
599 "at %08lx\n", (unsigned long)hv_mem->phys_start);
600 goto error_release_fw;
603 memcpy(hypervisor_mem, hypervisor->data, hypervisor->size);
604 memset(hypervisor_mem + hypervisor->size, 0,
605 hv_mem->size - hypervisor->size);
607 header = (struct jailhouse_header *)hypervisor_mem;
608 header->max_cpus = max_cpus;
610 config = (struct jailhouse_system *)
611 (hypervisor_mem + hv_core_and_percpu_size);
612 if (copy_from_user(config, arg, config_size)) {
617 if (config->debug_uart.flags & JAILHOUSE_MEM_IO) {
618 uart = ioremap(config->debug_uart.phys_start,
619 config->debug_uart.size);
622 pr_err("jailhouse: Unable to map hypervisor UART at "
624 (unsigned long)config->debug_uart.phys_start);
627 header->debug_uart_base = (void *)uart;
630 root_cell = create_cell(&config->root_cell);
631 if (IS_ERR(root_cell)) {
632 err = PTR_ERR(root_cell);
636 cpumask_and(&root_cell->cpus_assigned, &root_cell->cpus_assigned,
643 header->online_cpus = num_online_cpus();
645 atomic_set(&call_done, 0);
646 on_each_cpu(enter_hypervisor, header, 0);
647 while (atomic_read(&call_done) != num_online_cpus())
654 goto error_free_cell;
657 jailhouse_pci_do_all_devices(root_cell, JAILHOUSE_PCI_TYPE_IVSHMEM,
658 JAILHOUSE_PCI_ACTION_ADD);
663 release_firmware(hypervisor);
667 register_cell(root_cell);
671 pr_info("The Jailhouse is opening.\n");
676 delete_cell(root_cell);
679 vunmap(hypervisor_mem);
684 release_firmware(hypervisor);
687 module_put(THIS_MODULE);
694 static void leave_hypervisor(void *info)
700 /* Touch each hypervisor page we may need during the switch so that
701 * the active mm definitely contains all mappings. At least x86 does
702 * not support taking any faults while switching worlds. */
703 for (page = hypervisor_mem, size = hv_core_and_percpu_size; size > 0;
704 size -= PAGE_SIZE, page += PAGE_SIZE)
707 /* either returns 0 or the same error code across all CPUs */
708 err = jailhouse_call(JAILHOUSE_HC_DISABLE);
712 #if defined(CONFIG_X86) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)
713 /* on Intel, VMXE is now off - update the shadow */
717 atomic_inc(&call_done);
720 static int jailhouse_disable(void)
722 struct cell *cell, *tmp;
726 if (mutex_lock_interruptible(&lock) != 0)
738 atomic_set(&call_done, 0);
739 on_each_cpu(leave_hypervisor, NULL, 0);
740 while (atomic_read(&call_done) != num_online_cpus())
749 vunmap(hypervisor_mem);
751 for_each_cpu(cpu, &offlined_cpus) {
752 if (cpu_up(cpu) != 0)
753 pr_err("Jailhouse: failed to bring CPU %d back "
755 cpu_clear(cpu, offlined_cpus);
758 jailhouse_pci_do_all_devices(root_cell, JAILHOUSE_PCI_TYPE_IVSHMEM,
759 JAILHOUSE_PCI_ACTION_DEL);
761 list_for_each_entry_safe(cell, tmp, &cells, entry)
764 module_put(THIS_MODULE);
766 pr_info("The Jailhouse was closed.\n");
774 static int jailhouse_cell_create(struct jailhouse_cell_create __user *arg)
776 struct jailhouse_cell_create cell_params;
777 struct jailhouse_cell_desc *config;
778 struct jailhouse_cell_id cell_id;
783 if (copy_from_user(&cell_params, arg, sizeof(cell_params)))
786 config = kmalloc(cell_params.config_size, GFP_KERNEL | GFP_DMA);
790 if (copy_from_user(config,
791 (void *)(unsigned long)cell_params.config_address,
792 cell_params.config_size)) {
794 goto kfree_config_out;
796 config->name[JAILHOUSE_CELL_NAME_MAXLEN] = 0;
798 if (mutex_lock_interruptible(&lock) != 0) {
800 goto kfree_config_out;
808 cell_id.id = JAILHOUSE_CELL_ID_UNUSED;
809 memcpy(cell_id.name, config->name, sizeof(cell_id.name));
810 if (find_cell(&cell_id) != NULL) {
815 cell = create_cell(config);
821 if (!cpumask_subset(&cell->cpus_assigned, &root_cell->cpus_assigned)) {
823 goto error_cell_delete;
826 for_each_cpu(cpu, &cell->cpus_assigned) {
827 if (cpu_online(cpu)) {
830 goto error_cpu_online;
831 cpu_set(cpu, offlined_cpus);
833 cpu_clear(cpu, root_cell->cpus_assigned);
836 id = jailhouse_call_arg1(JAILHOUSE_HC_CELL_CREATE, __pa(config));
839 goto error_cpu_online;
845 pr_info("Created Jailhouse cell \"%s\"\n", config->name);
856 for_each_cpu(cpu, &cell->cpus_assigned) {
857 if (!cpu_online(cpu) && cpu_up(cpu) == 0)
858 cpu_clear(cpu, offlined_cpus);
859 cpu_set(cpu, root_cell->cpus_assigned);
867 static int cell_management_prologue(struct jailhouse_cell_id *cell_id,
868 struct cell **cell_ptr)
870 cell_id->name[JAILHOUSE_CELL_ID_NAMELEN] = 0;
872 if (mutex_lock_interruptible(&lock) != 0)
880 *cell_ptr = find_cell(cell_id);
881 if (*cell_ptr == NULL) {
888 #define MEM_REQ_FLAGS (JAILHOUSE_MEM_WRITE | JAILHOUSE_MEM_LOADABLE)
890 static int load_image(struct cell *cell,
891 struct jailhouse_preload_image __user *uimage)
893 struct jailhouse_preload_image image;
894 const struct jailhouse_memory *mem;
895 unsigned int regions;
900 if (copy_from_user(&image, uimage, sizeof(image)))
903 mem = cell->memory_regions;
904 for (regions = cell->num_memory_regions; regions > 0; regions--) {
905 image_offset = image.target_address - mem->virt_start;
906 if (image.target_address >= mem->virt_start &&
907 image_offset < mem->size) {
908 if (image.size > mem->size - image_offset ||
909 (mem->flags & MEM_REQ_FLAGS) != MEM_REQ_FLAGS)
918 image_mem = jailhouse_ioremap(mem->phys_start + image_offset, 0,
921 pr_err("jailhouse: Unable to map cell RAM at %08llx "
922 "for image loading\n",
923 (unsigned long long)(mem->phys_start + image_offset));
927 if (copy_from_user(image_mem,
928 (void *)(unsigned long)image.source_address,
937 static int jailhouse_cell_load(struct jailhouse_cell_load __user *arg)
939 struct jailhouse_preload_image __user *image = arg->image;
940 struct jailhouse_cell_load cell_load;
945 if (copy_from_user(&cell_load, arg, sizeof(cell_load)))
948 err = cell_management_prologue(&cell_load.cell_id, &cell);
952 err = jailhouse_call_arg1(JAILHOUSE_HC_CELL_SET_LOADABLE, cell->id);
956 for (n = cell_load.num_preload_images; n > 0; n--, image++) {
957 err = load_image(cell, image);
968 static int jailhouse_cell_start(const char __user *arg)
970 struct jailhouse_cell_id cell_id;
974 if (copy_from_user(&cell_id, arg, sizeof(cell_id)))
977 err = cell_management_prologue(&cell_id, &cell);
981 err = jailhouse_call_arg1(JAILHOUSE_HC_CELL_START, cell->id);
988 static int jailhouse_cell_destroy(const char __user *arg)
990 struct jailhouse_cell_id cell_id;
995 if (copy_from_user(&cell_id, arg, sizeof(cell_id)))
998 err = cell_management_prologue(&cell_id, &cell);
1002 err = jailhouse_call_arg1(JAILHOUSE_HC_CELL_DESTROY, cell->id);
1006 for_each_cpu(cpu, &cell->cpus_assigned) {
1007 if (cpu_isset(cpu, offlined_cpus)) {
1008 if (cpu_up(cpu) != 0)
1009 pr_err("Jailhouse: failed to bring CPU %d "
1010 "back online\n", cpu);
1011 cpu_clear(cpu, offlined_cpus);
1013 cpu_set(cpu, root_cell->cpus_assigned);
1016 pr_info("Destroyed Jailhouse cell \"%s\"\n",
1017 kobject_name(&cell->kobj));
1022 mutex_unlock(&lock);
1027 static long jailhouse_ioctl(struct file *file, unsigned int ioctl,
1033 case JAILHOUSE_ENABLE:
1034 err = jailhouse_enable(
1035 (struct jailhouse_system __user *)arg);
1037 case JAILHOUSE_DISABLE:
1038 err = jailhouse_disable();
1040 case JAILHOUSE_CELL_CREATE:
1041 err = jailhouse_cell_create(
1042 (struct jailhouse_cell_create __user *)arg);
1044 case JAILHOUSE_CELL_LOAD:
1045 err = jailhouse_cell_load(
1046 (struct jailhouse_cell_load __user *)arg);
1048 case JAILHOUSE_CELL_START:
1049 err = jailhouse_cell_start((const char __user *)arg);
1051 case JAILHOUSE_CELL_DESTROY:
1052 err = jailhouse_cell_destroy((const char __user *)arg);
1062 static const struct file_operations jailhouse_fops = {
1063 .owner = THIS_MODULE,
1064 .unlocked_ioctl = jailhouse_ioctl,
1065 .compat_ioctl = jailhouse_ioctl,
1066 .llseek = noop_llseek,
1069 static struct miscdevice jailhouse_misc_dev = {
1070 .minor = MISC_DYNAMIC_MINOR,
1071 .name = "jailhouse",
1072 .fops = &jailhouse_fops,
1075 static int jailhouse_shutdown_notify(struct notifier_block *unused1,
1076 unsigned long unused2, void *unused3)
1080 err = jailhouse_disable();
1081 if (err && err != -EINVAL)
1082 pr_emerg("jailhouse: ordered shutdown failed!\n");
1087 static struct notifier_block jailhouse_shutdown_nb = {
1088 .notifier_call = jailhouse_shutdown_notify,
1091 static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
1094 return sprintf(buffer, "%d\n", enabled);
1097 static ssize_t info_show(struct device *dev, char *buffer, unsigned int type)
1102 if (mutex_lock_interruptible(&lock) != 0)
1106 val = jailhouse_call_arg1(JAILHOUSE_HC_HYPERVISOR_GET_INFO,
1109 result = sprintf(buffer, "%ld\n", val);
1113 mutex_unlock(&lock);
1117 static ssize_t mem_pool_size_show(struct device *dev,
1118 struct device_attribute *attr, char *buffer)
1120 return info_show(dev, buffer, JAILHOUSE_INFO_MEM_POOL_SIZE);
1123 static ssize_t mem_pool_used_show(struct device *dev,
1124 struct device_attribute *attr, char *buffer)
1126 return info_show(dev, buffer, JAILHOUSE_INFO_MEM_POOL_USED);
1129 static ssize_t remap_pool_size_show(struct device *dev,
1130 struct device_attribute *attr,
1133 return info_show(dev, buffer, JAILHOUSE_INFO_REMAP_POOL_SIZE);
1136 static ssize_t remap_pool_used_show(struct device *dev,
1137 struct device_attribute *attr,
1140 return info_show(dev, buffer, JAILHOUSE_INFO_REMAP_POOL_USED);
1143 static DEVICE_ATTR_RO(enabled);
1144 static DEVICE_ATTR_RO(mem_pool_size);
1145 static DEVICE_ATTR_RO(mem_pool_used);
1146 static DEVICE_ATTR_RO(remap_pool_size);
1147 static DEVICE_ATTR_RO(remap_pool_used);
1149 static struct attribute *jailhouse_sysfs_entries[] = {
1150 &dev_attr_enabled.attr,
1151 &dev_attr_mem_pool_size.attr,
1152 &dev_attr_mem_pool_used.attr,
1153 &dev_attr_remap_pool_size.attr,
1154 &dev_attr_remap_pool_used.attr,
1158 static struct attribute_group jailhouse_attribute_group = {
1160 .attrs = jailhouse_sysfs_entries,
1163 static int __init jailhouse_init(void)
1167 jailhouse_dev = root_device_register("jailhouse");
1168 if (IS_ERR(jailhouse_dev))
1169 return PTR_ERR(jailhouse_dev);
1171 err = sysfs_create_group(&jailhouse_dev->kobj,
1172 &jailhouse_attribute_group);
1176 cells_dir = kobject_create_and_add("cells", &jailhouse_dev->kobj);
1182 err = misc_register(&jailhouse_misc_dev);
1184 goto remove_cells_dir;
1186 register_reboot_notifier(&jailhouse_shutdown_nb);
1193 kobject_put(cells_dir);
1196 sysfs_remove_group(&jailhouse_dev->kobj, &jailhouse_attribute_group);
1199 root_device_unregister(jailhouse_dev);
1203 static void __exit jailhouse_exit(void)
1205 unregister_reboot_notifier(&jailhouse_shutdown_nb);
1206 misc_deregister(&jailhouse_misc_dev);
1207 kobject_put(cells_dir);
1208 sysfs_remove_group(&jailhouse_dev->kobj, &jailhouse_attribute_group);
1209 root_device_unregister(jailhouse_dev);
1212 module_init(jailhouse_init);
1213 module_exit(jailhouse_exit);