2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013-2015
7 * Jan Kiszka <jan.kiszka@siemens.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <linux/cpu.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
22 #include <jailhouse/hypercall.h>
24 static LIST_HEAD(cells);
25 static struct cell *root_cell;
26 static cpumask_t offlined_cpus;
28 void jailhouse_cell_kobj_release(struct kobject *kobj)
30 struct cell *cell = container_of(kobj, struct cell, kobj);
32 jailhouse_pci_cell_cleanup(cell);
33 vfree(cell->memory_regions);
37 struct cell *jailhouse_cell_create(const struct jailhouse_cell_desc *cell_desc)
42 if (cell_desc->num_memory_regions >=
43 ULONG_MAX / sizeof(struct jailhouse_memory))
44 return ERR_PTR(-EINVAL);
46 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
48 return ERR_PTR(-ENOMEM);
50 INIT_LIST_HEAD(&cell->entry);
52 bitmap_copy(cpumask_bits(&cell->cpus_assigned),
53 jailhouse_cell_cpu_set(cell_desc),
54 min(nr_cpumask_bits, (int)cell_desc->cpu_set_size * 8));
56 cell->num_memory_regions = cell_desc->num_memory_regions;
57 cell->memory_regions = vmalloc(sizeof(struct jailhouse_memory) *
58 cell->num_memory_regions);
59 if (!cell->memory_regions) {
61 return ERR_PTR(-ENOMEM);
64 memcpy(cell->memory_regions, jailhouse_cell_mem_regions(cell_desc),
65 sizeof(struct jailhouse_memory) * cell->num_memory_regions);
67 err = jailhouse_pci_cell_setup(cell, cell_desc);
69 vfree(cell->memory_regions);
74 err = jailhouse_sysfs_cell_create(cell, cell_desc->name);
76 /* cleanup done by jailhouse_sysfs_cell_create */
82 void jailhouse_cell_register(struct cell *cell)
84 list_add_tail(&cell->entry, &cells);
85 jailhouse_sysfs_cell_register(cell);
88 static struct cell *find_cell(struct jailhouse_cell_id *cell_id)
92 list_for_each_entry(cell, &cells, entry)
93 if (cell_id->id == cell->id ||
94 (cell_id->id == JAILHOUSE_CELL_ID_UNUSED &&
95 strcmp(kobject_name(&cell->kobj), cell_id->name) == 0))
100 void jailhouse_cell_delete(struct cell *cell)
102 list_del(&cell->entry);
103 jailhouse_sysfs_cell_delete(cell);
106 int jailhouse_cell_prepare_root(const struct jailhouse_cell_desc *cell_desc)
108 root_cell = jailhouse_cell_create(cell_desc);
109 if (IS_ERR(root_cell))
110 return PTR_ERR(root_cell);
112 cpumask_and(&root_cell->cpus_assigned, &root_cell->cpus_assigned,
118 void jailhouse_cell_register_root(void)
120 jailhouse_pci_do_all_devices(root_cell, JAILHOUSE_PCI_TYPE_IVSHMEM,
121 JAILHOUSE_PCI_ACTION_ADD);
124 jailhouse_cell_register(root_cell);
127 void jailhouse_cell_delete_root(void)
129 jailhouse_cell_delete(root_cell);
132 void jailhouse_cell_delete_all(void)
134 struct cell *cell, *tmp;
137 jailhouse_pci_do_all_devices(root_cell, JAILHOUSE_PCI_TYPE_IVSHMEM,
138 JAILHOUSE_PCI_ACTION_DEL);
140 list_for_each_entry_safe(cell, tmp, &cells, entry)
141 jailhouse_cell_delete(cell);
143 for_each_cpu(cpu, &offlined_cpus) {
144 if (cpu_up(cpu) != 0)
145 pr_err("Jailhouse: failed to bring CPU %d back "
147 cpu_clear(cpu, offlined_cpus);
151 int jailhouse_cmd_cell_create(struct jailhouse_cell_create __user *arg)
153 struct jailhouse_cell_create cell_params;
154 struct jailhouse_cell_desc *config;
155 struct jailhouse_cell_id cell_id;
156 void __user *user_config;
161 if (copy_from_user(&cell_params, arg, sizeof(cell_params)))
164 config = kmalloc(cell_params.config_size, GFP_KERNEL | GFP_DMA);
168 user_config = (void __user *)(unsigned long)cell_params.config_address;
169 if (copy_from_user(config, user_config, cell_params.config_size)) {
171 goto kfree_config_out;
173 config->name[JAILHOUSE_CELL_NAME_MAXLEN] = 0;
175 if (mutex_lock_interruptible(&jailhouse_lock) != 0) {
177 goto kfree_config_out;
180 if (!jailhouse_enabled) {
185 cell_id.id = JAILHOUSE_CELL_ID_UNUSED;
186 memcpy(cell_id.name, config->name, sizeof(cell_id.name));
187 if (find_cell(&cell_id) != NULL) {
192 cell = jailhouse_cell_create(config);
198 if (!cpumask_subset(&cell->cpus_assigned, &root_cell->cpus_assigned)) {
200 goto error_cell_delete;
203 for_each_cpu(cpu, &cell->cpus_assigned) {
204 if (cpu_online(cpu)) {
207 goto error_cpu_online;
208 cpu_set(cpu, offlined_cpus);
210 cpu_clear(cpu, root_cell->cpus_assigned);
213 id = jailhouse_call_arg1(JAILHOUSE_HC_CELL_CREATE, __pa(config));
216 goto error_cpu_online;
220 jailhouse_cell_register(cell);
222 pr_info("Created Jailhouse cell \"%s\"\n", config->name);
225 mutex_unlock(&jailhouse_lock);
233 for_each_cpu(cpu, &cell->cpus_assigned) {
234 if (!cpu_online(cpu) && cpu_up(cpu) == 0)
235 cpu_clear(cpu, offlined_cpus);
236 cpu_set(cpu, root_cell->cpus_assigned);
240 jailhouse_cell_delete(cell);
244 static int cell_management_prologue(struct jailhouse_cell_id *cell_id,
245 struct cell **cell_ptr)
247 cell_id->name[JAILHOUSE_CELL_ID_NAMELEN] = 0;
249 if (mutex_lock_interruptible(&jailhouse_lock) != 0)
252 if (!jailhouse_enabled) {
253 mutex_unlock(&jailhouse_lock);
257 *cell_ptr = find_cell(cell_id);
258 if (*cell_ptr == NULL) {
259 mutex_unlock(&jailhouse_lock);
265 #define MEM_REQ_FLAGS (JAILHOUSE_MEM_WRITE | JAILHOUSE_MEM_LOADABLE)
267 static int load_image(struct cell *cell,
268 struct jailhouse_preload_image __user *uimage)
270 struct jailhouse_preload_image image;
271 const struct jailhouse_memory *mem;
272 unsigned int regions;
277 if (copy_from_user(&image, uimage, sizeof(image)))
280 mem = cell->memory_regions;
281 for (regions = cell->num_memory_regions; regions > 0; regions--) {
282 image_offset = image.target_address - mem->virt_start;
283 if (image.target_address >= mem->virt_start &&
284 image_offset < mem->size) {
285 if (image.size > mem->size - image_offset ||
286 (mem->flags & MEM_REQ_FLAGS) != MEM_REQ_FLAGS)
295 image_mem = jailhouse_ioremap(mem->phys_start + image_offset, 0,
298 pr_err("jailhouse: Unable to map cell RAM at %08llx "
299 "for image loading\n",
300 (unsigned long long)(mem->phys_start + image_offset));
304 if (copy_from_user(image_mem,
305 (void __user *)(unsigned long)image.source_address,
314 int jailhouse_cmd_cell_load(struct jailhouse_cell_load __user *arg)
316 struct jailhouse_preload_image __user *image = arg->image;
317 struct jailhouse_cell_load cell_load;
322 if (copy_from_user(&cell_load, arg, sizeof(cell_load)))
325 err = cell_management_prologue(&cell_load.cell_id, &cell);
329 err = jailhouse_call_arg1(JAILHOUSE_HC_CELL_SET_LOADABLE, cell->id);
333 for (n = cell_load.num_preload_images; n > 0; n--, image++) {
334 err = load_image(cell, image);
340 mutex_unlock(&jailhouse_lock);
345 int jailhouse_cmd_cell_start(const char __user *arg)
347 struct jailhouse_cell_id cell_id;
351 if (copy_from_user(&cell_id, arg, sizeof(cell_id)))
354 err = cell_management_prologue(&cell_id, &cell);
358 err = jailhouse_call_arg1(JAILHOUSE_HC_CELL_START, cell->id);
360 mutex_unlock(&jailhouse_lock);
365 int jailhouse_cmd_cell_destroy(const char __user *arg)
367 struct jailhouse_cell_id cell_id;
372 if (copy_from_user(&cell_id, arg, sizeof(cell_id)))
375 err = cell_management_prologue(&cell_id, &cell);
379 err = jailhouse_call_arg1(JAILHOUSE_HC_CELL_DESTROY, cell->id);
383 for_each_cpu(cpu, &cell->cpus_assigned) {
384 if (cpu_isset(cpu, offlined_cpus)) {
385 if (cpu_up(cpu) != 0)
386 pr_err("Jailhouse: failed to bring CPU %d "
387 "back online\n", cpu);
388 cpu_clear(cpu, offlined_cpus);
390 cpu_set(cpu, root_cell->cpus_assigned);
393 pr_info("Destroyed Jailhouse cell \"%s\"\n",
394 kobject_name(&cell->kobj));
396 jailhouse_cell_delete(cell);
399 mutex_unlock(&jailhouse_lock);