]> rtime.felk.cvut.cz Git - jailhouse.git/blob - driver/main.c
driver: Improve input validation to make code scanners happier
[jailhouse.git] / driver / main.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013-2015
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2.  See
12  * the COPYING file in the top-level directory.
13  */
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/miscdevice.h>
20 #include <linux/firmware.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/uaccess.h>
24 #include <linux/reboot.h>
25 #include <linux/vmalloc.h>
26 #include <linux/io.h>
27 #include <asm/smp.h>
28 #include <asm/cacheflush.h>
29 #include <asm/tlbflush.h>
30
31 #include "cell.h"
32 #include "jailhouse.h"
33 #include "main.h"
34 #include "pci.h"
35 #include "sysfs.h"
36
37 #include <jailhouse/header.h>
38 #include <jailhouse/hypercall.h>
39 #include <generated/version.h>
40
41 #ifdef CONFIG_X86_32
42 #error 64-bit kernel required!
43 #endif
44
45 #if JAILHOUSE_CELL_ID_NAMELEN != JAILHOUSE_CELL_NAME_MAXLEN
46 # warning JAILHOUSE_CELL_ID_NAMELEN and JAILHOUSE_CELL_NAME_MAXLEN out of sync!
47 #endif
48
49 /* For compatibility with older kernel versions */
50 #include <linux/version.h>
51
52 #ifdef CONFIG_X86
53 #define JAILHOUSE_AMD_FW_NAME   "jailhouse-amd.bin"
54 #define JAILHOUSE_INTEL_FW_NAME "jailhouse-intel.bin"
55 #else
56 #define JAILHOUSE_FW_NAME       "jailhouse.bin"
57 #endif
58
59 MODULE_DESCRIPTION("Management driver for Jailhouse partitioning hypervisor");
60 MODULE_LICENSE("GPL");
61 #ifdef CONFIG_X86
62 MODULE_FIRMWARE(JAILHOUSE_AMD_FW_NAME);
63 MODULE_FIRMWARE(JAILHOUSE_INTEL_FW_NAME);
64 #else
65 MODULE_FIRMWARE(JAILHOUSE_FW_NAME);
66 #endif
67 MODULE_VERSION(JAILHOUSE_VERSION);
68
69 DEFINE_MUTEX(jailhouse_lock);
70 bool jailhouse_enabled;
71
72 static struct device *jailhouse_dev;
73 static void *hypervisor_mem;
74 static unsigned long hv_core_and_percpu_size;
75 static atomic_t call_done;
76 static int error_code;
77
78 #ifdef CONFIG_X86
79 bool jailhouse_use_vmcall;
80
81 static void init_hypercall(void)
82 {
83         jailhouse_use_vmcall = boot_cpu_has(X86_FEATURE_VMX);
84 }
85 #else /* !CONFIG_X86 */
86 static void init_hypercall(void)
87 {
88 }
89 #endif
90
91 static long get_max_cpus(u32 cpu_set_size,
92                          const struct jailhouse_system __user *system_config)
93 {
94         u8 __user *cpu_set =
95                 (u8 __user *)jailhouse_cell_cpu_set(
96                                 (const struct jailhouse_cell_desc * __force)
97                                 &system_config->root_cell);
98         unsigned int pos = cpu_set_size;
99         long max_cpu_id;
100         u8 bitmap;
101
102         while (pos-- > 0) {
103                 if (get_user(bitmap, cpu_set + pos))
104                         return -EFAULT;
105                 max_cpu_id = fls(bitmap);
106                 if (max_cpu_id > 0)
107                         return pos * 8 + max_cpu_id;
108         }
109         return -EINVAL;
110 }
111
112 void *jailhouse_ioremap(phys_addr_t phys, unsigned long virt,
113                         unsigned long size)
114 {
115         struct vm_struct *vma;
116
117         size = PAGE_ALIGN(size);
118         if (virt)
119                 vma = __get_vm_area(size, VM_IOREMAP, virt,
120                                     virt + size + PAGE_SIZE);
121         else
122                 vma = __get_vm_area(size, VM_IOREMAP, VMALLOC_START,
123                                     VMALLOC_END);
124         if (!vma)
125                 return NULL;
126         vma->phys_addr = phys;
127
128         if (ioremap_page_range((unsigned long)vma->addr,
129                                (unsigned long)vma->addr + size, phys,
130                                PAGE_KERNEL_EXEC)) {
131                 vunmap(vma->addr);
132                 return NULL;
133         }
134
135         return vma->addr;
136 }
137
138 static void enter_hypervisor(void *info)
139 {
140         struct jailhouse_header *header = info;
141         unsigned int cpu = smp_processor_id();
142         int err;
143
144         if (cpu < header->max_cpus)
145                 /* either returns 0 or the same error code across all CPUs */
146                 err = header->entry(cpu);
147         else
148                 err = -EINVAL;
149
150         if (err)
151                 error_code = err;
152
153 #if defined(CONFIG_X86) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)
154         /* on Intel, VMXE is now on - update the shadow */
155         cr4_init_shadow();
156 #endif
157
158         atomic_inc(&call_done);
159 }
160
161 static inline const char * jailhouse_fw_name(void)
162 {
163 #ifdef CONFIG_X86
164         if (boot_cpu_has(X86_FEATURE_SVM))
165                 return JAILHOUSE_AMD_FW_NAME;
166         if (boot_cpu_has(X86_FEATURE_VMX))
167                 return JAILHOUSE_INTEL_FW_NAME;
168         return NULL;
169 #else
170         return JAILHOUSE_FW_NAME;
171 #endif
172 }
173
174 static int jailhouse_cmd_enable(struct jailhouse_system __user *arg)
175 {
176         const struct firmware *hypervisor;
177         struct jailhouse_system config_header;
178         struct jailhouse_system *config;
179         struct jailhouse_memory *hv_mem = &config_header.hypervisor_memory;
180         struct jailhouse_header *header;
181         void __iomem *uart = NULL;
182         unsigned long config_size;
183         const char *fw_name;
184         long max_cpus;
185         int err;
186
187         fw_name = jailhouse_fw_name();
188         if (!fw_name) {
189                 pr_err("jailhouse: Missing or unsupported HVM technology\n");
190                 return -ENODEV;
191         }
192
193         if (copy_from_user(&config_header, arg, sizeof(config_header)))
194                 return -EFAULT;
195         config_header.root_cell.name[JAILHOUSE_CELL_NAME_MAXLEN] = 0;
196
197         max_cpus = get_max_cpus(config_header.root_cell.cpu_set_size, arg);
198         if (max_cpus < 0)
199                 return max_cpus;
200         if (max_cpus > UINT_MAX)
201                 return -EINVAL;
202
203         if (mutex_lock_interruptible(&jailhouse_lock) != 0)
204                 return -EINTR;
205
206         err = -EBUSY;
207         if (jailhouse_enabled || !try_module_get(THIS_MODULE))
208                 goto error_unlock;
209
210         err = request_firmware(&hypervisor, fw_name, jailhouse_dev);
211         if (err) {
212                 pr_err("jailhouse: Missing hypervisor image %s\n", fw_name);
213                 goto error_put_module;
214         }
215
216         header = (struct jailhouse_header *)hypervisor->data;
217
218         err = -EINVAL;
219         if (memcmp(header->signature, JAILHOUSE_SIGNATURE,
220                    sizeof(header->signature)) != 0 ||
221             hypervisor->size >= hv_mem->size)
222                 goto error_release_fw;
223
224         hv_core_and_percpu_size = PAGE_ALIGN(header->core_size) +
225                 max_cpus * header->percpu_size;
226         config_size = jailhouse_system_config_size(&config_header);
227         if (hv_core_and_percpu_size >= hv_mem->size ||
228             config_size >= hv_mem->size - hv_core_and_percpu_size)
229                 goto error_release_fw;
230
231         hypervisor_mem = jailhouse_ioremap(hv_mem->phys_start, JAILHOUSE_BASE,
232                                            hv_mem->size);
233         if (!hypervisor_mem) {
234                 pr_err("jailhouse: Unable to map RAM reserved for hypervisor "
235                        "at %08lx\n", (unsigned long)hv_mem->phys_start);
236                 goto error_release_fw;
237         }
238
239         memcpy(hypervisor_mem, hypervisor->data, hypervisor->size);
240         memset(hypervisor_mem + hypervisor->size, 0,
241                hv_mem->size - hypervisor->size);
242
243         header = (struct jailhouse_header *)hypervisor_mem;
244         header->max_cpus = max_cpus;
245
246         config = (struct jailhouse_system *)
247                 (hypervisor_mem + hv_core_and_percpu_size);
248         if (copy_from_user(config, arg, config_size)) {
249                 err = -EFAULT;
250                 goto error_unmap;
251         }
252
253         if (config->debug_uart.flags & JAILHOUSE_MEM_IO) {
254                 uart = ioremap(config->debug_uart.phys_start,
255                                config->debug_uart.size);
256                 if (!uart) {
257                         err = -EINVAL;
258                         pr_err("jailhouse: Unable to map hypervisor UART at "
259                                "%08lx\n",
260                                (unsigned long)config->debug_uart.phys_start);
261                         goto error_unmap;
262                 }
263                 /* The hypervisor has no notion of address spaces, so we need
264                  * to enforce conversion. */
265                 header->debug_uart_base = (void * __force)uart;
266         }
267
268         err = jailhouse_cell_prepare_root(&config->root_cell);
269         if (err)
270                 goto error_unmap;
271
272         error_code = 0;
273
274         preempt_disable();
275
276         header->online_cpus = num_online_cpus();
277
278         atomic_set(&call_done, 0);
279         on_each_cpu(enter_hypervisor, header, 0);
280         while (atomic_read(&call_done) != num_online_cpus())
281                 cpu_relax();
282
283         preempt_enable();
284
285         if (error_code) {
286                 err = error_code;
287                 goto error_free_cell;
288         }
289
290         if (uart)
291                 iounmap(uart);
292
293         release_firmware(hypervisor);
294
295         jailhouse_cell_register_root();
296
297         jailhouse_enabled = true;
298
299         mutex_unlock(&jailhouse_lock);
300
301         pr_info("The Jailhouse is opening.\n");
302
303         return 0;
304
305 error_free_cell:
306         jailhouse_cell_delete_root();
307
308 error_unmap:
309         vunmap(hypervisor_mem);
310         if (uart)
311                 iounmap(uart);
312
313 error_release_fw:
314         release_firmware(hypervisor);
315
316 error_put_module:
317         module_put(THIS_MODULE);
318
319 error_unlock:
320         mutex_unlock(&jailhouse_lock);
321         return err;
322 }
323
324 static void leave_hypervisor(void *info)
325 {
326         unsigned long size;
327         void *page;
328         int err;
329
330         /* Touch each hypervisor page we may need during the switch so that
331          * the active mm definitely contains all mappings. At least x86 does
332          * not support taking any faults while switching worlds. */
333         for (page = hypervisor_mem, size = hv_core_and_percpu_size; size > 0;
334              size -= PAGE_SIZE, page += PAGE_SIZE)
335                 readl((void __iomem *)page);
336
337         /* either returns 0 or the same error code across all CPUs */
338         err = jailhouse_call(JAILHOUSE_HC_DISABLE);
339         if (err)
340                 error_code = err;
341
342 #if defined(CONFIG_X86) && LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)
343         /* on Intel, VMXE is now off - update the shadow */
344         cr4_init_shadow();
345 #endif
346
347         atomic_inc(&call_done);
348 }
349
350 static int jailhouse_cmd_disable(void)
351 {
352         int err;
353
354         if (mutex_lock_interruptible(&jailhouse_lock) != 0)
355                 return -EINTR;
356
357         if (!jailhouse_enabled) {
358                 err = -EINVAL;
359                 goto unlock_out;
360         }
361
362         error_code = 0;
363
364         preempt_disable();
365
366         atomic_set(&call_done, 0);
367         on_each_cpu(leave_hypervisor, NULL, 0);
368         while (atomic_read(&call_done) != num_online_cpus())
369                 cpu_relax();
370
371         preempt_enable();
372
373         err = error_code;
374         if (err)
375                 goto unlock_out;
376
377         vunmap(hypervisor_mem);
378
379         jailhouse_cell_delete_all();
380         jailhouse_enabled = false;
381         module_put(THIS_MODULE);
382
383         pr_info("The Jailhouse was closed.\n");
384
385 unlock_out:
386         mutex_unlock(&jailhouse_lock);
387
388         return err;
389 }
390
391 static long jailhouse_ioctl(struct file *file, unsigned int ioctl,
392                             unsigned long arg)
393 {
394         long err;
395
396         switch (ioctl) {
397         case JAILHOUSE_ENABLE:
398                 err = jailhouse_cmd_enable(
399                         (struct jailhouse_system __user *)arg);
400                 break;
401         case JAILHOUSE_DISABLE:
402                 err = jailhouse_cmd_disable();
403                 break;
404         case JAILHOUSE_CELL_CREATE:
405                 err = jailhouse_cmd_cell_create(
406                         (struct jailhouse_cell_create __user *)arg);
407                 break;
408         case JAILHOUSE_CELL_LOAD:
409                 err = jailhouse_cmd_cell_load(
410                         (struct jailhouse_cell_load __user *)arg);
411                 break;
412         case JAILHOUSE_CELL_START:
413                 err = jailhouse_cmd_cell_start((const char __user *)arg);
414                 break;
415         case JAILHOUSE_CELL_DESTROY:
416                 err = jailhouse_cmd_cell_destroy((const char __user *)arg);
417                 break;
418         default:
419                 err = -EINVAL;
420                 break;
421         }
422
423         return err;
424 }
425
426 static const struct file_operations jailhouse_fops = {
427         .owner = THIS_MODULE,
428         .unlocked_ioctl = jailhouse_ioctl,
429         .compat_ioctl = jailhouse_ioctl,
430         .llseek = noop_llseek,
431 };
432
433 static struct miscdevice jailhouse_misc_dev = {
434         .minor = MISC_DYNAMIC_MINOR,
435         .name = "jailhouse",
436         .fops = &jailhouse_fops,
437 };
438
439 static int jailhouse_shutdown_notify(struct notifier_block *unused1,
440                                      unsigned long unused2, void *unused3)
441 {
442         int err;
443
444         err = jailhouse_cmd_disable();
445         if (err && err != -EINVAL)
446                 pr_emerg("jailhouse: ordered shutdown failed!\n");
447
448         return NOTIFY_DONE;
449 }
450
451 static struct notifier_block jailhouse_shutdown_nb = {
452         .notifier_call = jailhouse_shutdown_notify,
453 };
454
455 static int __init jailhouse_init(void)
456 {
457         int err;
458
459         jailhouse_dev = root_device_register("jailhouse");
460         if (IS_ERR(jailhouse_dev))
461                 return PTR_ERR(jailhouse_dev);
462
463         err = jailhouse_sysfs_init(jailhouse_dev);
464         if (err)
465                 goto unreg_dev;
466
467         err = misc_register(&jailhouse_misc_dev);
468         if (err)
469                 goto exit_sysfs;
470
471         register_reboot_notifier(&jailhouse_shutdown_nb);
472
473         init_hypercall();
474
475         return 0;
476
477 exit_sysfs:
478         jailhouse_sysfs_exit(jailhouse_dev);
479
480 unreg_dev:
481         root_device_unregister(jailhouse_dev);
482         return err;
483 }
484
485 static void __exit jailhouse_exit(void)
486 {
487         unregister_reboot_notifier(&jailhouse_shutdown_nb);
488         misc_deregister(&jailhouse_misc_dev);
489         jailhouse_sysfs_exit(jailhouse_dev);
490         root_device_unregister(jailhouse_dev);
491 }
492
493 module_init(jailhouse_init);
494 module_exit(jailhouse_exit);