2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2015
7 * Jan Kiszka <jan.kiszka@siemens.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/cell.h>
14 #include <jailhouse/mmio.h>
15 #include <jailhouse/paging.h>
16 #include <jailhouse/printk.h>
17 #include <asm/percpu.h>
20 * Perform MMIO-specific initialization for a new cell.
21 * @param cell Cell to be initialized.
23 * @return 0 on success, negative error code otherwise.
27 int mmio_cell_init(struct cell *cell)
31 cell->max_mmio_regions = arch_mmio_count_regions(cell);
33 pages = page_alloc(&mem_pool,
34 PAGES(cell->max_mmio_regions *
35 (sizeof(struct mmio_region_location) +
36 sizeof(struct mmio_region_handler))));
40 cell->mmio_locations = pages;
41 cell->mmio_handlers = pages +
42 cell->max_mmio_regions * sizeof(struct mmio_region_location);
47 static void copy_region(struct cell *cell, unsigned int src, unsigned dst)
50 * Invalidate destination region by shrinking it to size 0. This has to
51 * be made visible to other CPUs via a memory barrier before
52 * manipulating other destination fields.
54 cell->mmio_locations[dst].size = 0;
57 cell->mmio_locations[dst].start = cell->mmio_locations[src].start;
58 cell->mmio_handlers[dst] = cell->mmio_handlers[src];
59 /* Ensure all fields are committed before activating the region. */
62 cell->mmio_locations[dst].size = cell->mmio_locations[src].size;
66 * Register a MMIO region access handler for a cell.
67 * @param cell Cell than can access the region.
68 * @param start Region start address in cell address space.
69 * @param size Region size.
70 * @param handler Access handler.
71 * @param handler_arg Opaque argument to pass to handler.
73 * @see mmio_region_unregister
75 void mmio_region_register(struct cell *cell, unsigned long start,
76 unsigned long size, mmio_handler handler,
79 unsigned int index, n;
81 spin_lock(&cell->mmio_region_lock);
83 if (cell->num_mmio_regions >= cell->max_mmio_regions) {
84 spin_unlock(&cell->mmio_region_lock);
86 printk("WARNING: Overflow during MMIO region registration!\n");
90 for (index = 0; index < cell->num_mmio_regions; index++)
91 if (cell->mmio_locations[index].start > start)
95 * Set and commit a dummy region at the end if the list so that
96 * we can safely grow it.
98 cell->mmio_locations[cell->num_mmio_regions].start = -1;
99 cell->mmio_locations[cell->num_mmio_regions].size = 0;
103 * Extend region list by one so that we can start moving entries.
104 * Commit this change via a barrier so that the current last element
105 * will remain visible when moving it up.
107 cell->num_mmio_regions++;
110 for (n = cell->num_mmio_regions - 1; n > index; n--)
111 copy_region(cell, n - 1, n);
113 /* Invalidate the new region entry first (see also copy_region()). */
114 cell->mmio_locations[index].size = 0;
117 cell->mmio_locations[index].start = start;
118 cell->mmio_handlers[index].handler = handler;
119 cell->mmio_handlers[index].arg = handler_arg;
120 /* Ensure all fields are committed before activating the region. */
123 cell->mmio_locations[index].size = size;
125 spin_unlock(&cell->mmio_region_lock);
128 static int find_region(struct cell *cell, unsigned long address,
131 unsigned int range_start = 0;
132 unsigned int range_size = cell->num_mmio_regions;
133 struct mmio_region_location region;
136 while (range_size > 0) {
137 index = range_start + range_size / 2;
138 region = cell->mmio_locations[index];
140 if (address < region.start) {
141 range_size = index - range_start;
142 } else if (region.start + region.size < address + size) {
143 range_size -= index + 1 - range_start;
144 range_start = index + 1;
153 * Unregister MMIO region from a cell.
154 * @param cell Cell the region belongs to.
155 * @param start Region start address as it was passed to
156 * mmio_region_register().
158 * @see mmio_region_register
160 void mmio_region_unregister(struct cell *cell, unsigned long start)
164 spin_lock(&cell->mmio_region_lock);
166 index = find_region(cell, start, 0);
168 for (/* empty */; index < cell->num_mmio_regions; index++)
169 copy_region(cell, index + 1, index);
172 * Ensure the last region move is visible before shrinking the
177 cell->num_mmio_regions--;
179 spin_unlock(&cell->mmio_region_lock);
183 * Dispatch MMIO access of a cell CPU.
184 * @param mmio MMIO access description. @a mmio->value will receive the
185 * result of a successful read access. All @a mmio fields
186 * may have been modified on return.
188 * @return MMIO_HANDLED on success, MMIO_UNHANDLED if no region is registered
189 * for the access address and size, or MMIO_ERROR if an access error was
192 * @see mmio_region_register
193 * @see mmio_region_unregister
195 enum mmio_result mmio_handle_access(struct mmio_access *mmio)
197 struct cell *cell = this_cell();
198 int index = find_region(cell, mmio->address, mmio->size);
199 mmio_handler handler;
202 return MMIO_UNHANDLED;
204 handler = cell->mmio_handlers[index].handler;
205 mmio->address -= cell->mmio_locations[index].start;
206 return handler(cell->mmio_handlers[index].arg, mmio);
210 * Perform MMIO-specific cleanup for a cell under destruction.
211 * @param cell Cell to be destructed.
213 * @see mmio_cell_init
215 void mmio_cell_exit(struct cell *cell)
217 page_free(&mem_pool, cell->mmio_locations,
218 PAGES(cell->max_mmio_regions *
219 (sizeof(struct mmio_region_location) +
220 sizeof(struct mmio_region_handler))));