]> rtime.felk.cvut.cz Git - jailhouse.git/blobdiff - hypervisor/mmio.c
Merge remote-tracking branch 'kiszka/master'
[jailhouse.git] / hypervisor / mmio.c
index a64093ca3e9dcaa9f1dce0c27dccbcb0e9d5c125..30259ee73be421dba223dc0197ad53d6370fec0c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Jailhouse, a Linux-based partitioning hypervisor
  *
- * Copyright (c) Siemens AG, 2015
+ * Copyright (c) Siemens AG, 2015, 2016
  *
  * Authors:
  *  Jan Kiszka <jan.kiszka@siemens.com>
@@ -11,6 +11,7 @@
  */
 
 #include <jailhouse/cell.h>
+#include <jailhouse/control.h>
 #include <jailhouse/mmio.h>
 #include <jailhouse/paging.h>
 #include <jailhouse/printk.h>
  */
 int mmio_cell_init(struct cell *cell)
 {
+       const struct jailhouse_memory *mem;
+       unsigned int n;
        void *pages;
 
        cell->max_mmio_regions = arch_mmio_count_regions(cell);
 
+       for_each_mem_region(mem, cell->config, n)
+               if (JAILHOUSE_MEMORY_IS_SUBPAGE(mem))
+                       cell->max_mmio_regions++;
+
        pages = page_alloc(&mem_pool,
                           PAGES(cell->max_mmio_regions *
                                 (sizeof(struct mmio_region_location) +
@@ -92,7 +99,7 @@ void mmio_region_register(struct cell *cell, unsigned long start,
                        break;
 
        /*
-        * Set and commit a dummy region at the end if the list so that
+        * Set and commit a dummy region at the end of the list so that
         * we can safely grow it.
         */
        cell->mmio_locations[cell->num_mmio_regions].start = -1;
@@ -219,3 +226,107 @@ void mmio_cell_exit(struct cell *cell)
                        (sizeof(struct mmio_region_location) +
                         sizeof(struct mmio_region_handler))));
 }
+
+void mmio_perform_access(void *base, struct mmio_access *mmio)
+{
+       void *addr = base + mmio->address;
+
+       if (mmio->is_write)
+               switch (mmio->size) {
+               case 1:
+                       mmio_write8(addr, mmio->value);
+                       break;
+               case 2:
+                       mmio_write16(addr, mmio->value);
+                       break;
+               case 4:
+                       mmio_write32(addr, mmio->value);
+                       break;
+#if BITS_PER_LONG == 64
+               case 8:
+                       mmio_write64(addr, mmio->value);
+                       break;
+#endif
+               }
+       else
+               switch (mmio->size) {
+               case 1:
+                       mmio->value = mmio_read8(addr);
+                       break;
+               case 2:
+                       mmio->value = mmio_read16(addr);
+                       break;
+               case 4:
+                       mmio->value = mmio_read32(addr);
+                       break;
+#if BITS_PER_LONG == 64
+               case 8:
+                       mmio->value = mmio_read64(addr);
+                       break;
+#endif
+               }
+}
+
+static enum mmio_result mmio_handle_subpage(void *arg, struct mmio_access *mmio)
+{
+       const struct jailhouse_memory *mem = arg;
+       u64 perm = mmio->is_write ? JAILHOUSE_MEM_WRITE : JAILHOUSE_MEM_READ;
+       unsigned long page_virt = TEMPORARY_MAPPING_BASE +
+               this_cpu_id() * PAGE_SIZE * NUM_TEMPORARY_PAGES;
+       unsigned long page_phys =
+               ((unsigned long)mem->phys_start + mmio->address) & PAGE_MASK;
+       unsigned long virt_base;
+       int err;
+
+       /* check read/write access permissions */
+       if (!(mem->flags & perm))
+               goto invalid_access;
+
+       /* width bit according to access size needs to be set */
+       if (!((mmio->size << JAILHOUSE_MEM_IO_WIDTH_SHIFT) & mem->flags))
+               goto invalid_access;
+
+       /* naturally unaligned access needs to be allowed explicitly */
+       if (mmio->address & (mmio->size - 1) &&
+           !(mem->flags & JAILHOUSE_MEM_IO_UNALIGNED))
+               goto invalid_access;
+
+       err = paging_create(&hv_paging_structs, page_phys, PAGE_SIZE,
+                           page_virt, PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
+                           PAGING_NON_COHERENT);
+       if (err)
+               goto invalid_access;
+
+       /*
+        * This virt_base gives the following effective virtual address in
+        * mmio_perform_access:
+        *
+        *     page_virt + (mem->phys_start & ~PAGE_MASK) +
+        *         (mmio->address & ~PAGE_MASK)
+        *
+        * Reason: mmio_perform_access does addr = base + mmio->address.
+        */
+       virt_base = page_virt + (mem->phys_start & ~PAGE_MASK) -
+               (mmio->address & PAGE_MASK);
+       mmio_perform_access((void *)virt_base, mmio);
+       return MMIO_HANDLED;
+
+invalid_access:
+       panic_printk("FATAL: Invalid MMIO %s, address: %x, size: %x\n",
+                    mmio->is_write ? "write" : "read",
+                    mem->phys_start + mmio->address, mmio->size);
+       return MMIO_ERROR;
+}
+
+int mmio_subpage_register(struct cell *cell, const struct jailhouse_memory *mem)
+{
+       mmio_region_register(cell, mem->virt_start, mem->size,
+                            mmio_handle_subpage, (void *)mem);
+       return 0;
+}
+
+void mmio_subpage_unregister(struct cell *cell,
+                            const struct jailhouse_memory *mem)
+{
+       mmio_region_unregister(cell, mem->virt_start);
+}