2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2015, 2016
7 * Jan Kiszka <jan.kiszka@siemens.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/cell.h>
14 #include <jailhouse/control.h>
15 #include <jailhouse/mmio.h>
16 #include <jailhouse/paging.h>
17 #include <jailhouse/printk.h>
18 #include <asm/percpu.h>
21 * Perform MMIO-specific initialization for a new cell.
22 * @param cell Cell to be initialized.
24 * @return 0 on success, negative error code otherwise.
28 int mmio_cell_init(struct cell *cell)
30 const struct jailhouse_memory *mem;
34 cell->max_mmio_regions = arch_mmio_count_regions(cell);
36 for_each_mem_region(mem, cell->config, n)
37 if (JAILHOUSE_MEMORY_IS_SUBPAGE(mem))
38 cell->max_mmio_regions++;
40 pages = page_alloc(&mem_pool,
41 PAGES(cell->max_mmio_regions *
42 (sizeof(struct mmio_region_location) +
43 sizeof(struct mmio_region_handler))));
47 cell->mmio_locations = pages;
48 cell->mmio_handlers = pages +
49 cell->max_mmio_regions * sizeof(struct mmio_region_location);
54 static void copy_region(struct cell *cell, unsigned int src, unsigned dst)
57 * Invalidate destination region by shrinking it to size 0. This has to
58 * be made visible to other CPUs via a memory barrier before
59 * manipulating other destination fields.
61 cell->mmio_locations[dst].size = 0;
64 cell->mmio_locations[dst].start = cell->mmio_locations[src].start;
65 cell->mmio_handlers[dst] = cell->mmio_handlers[src];
66 /* Ensure all fields are committed before activating the region. */
69 cell->mmio_locations[dst].size = cell->mmio_locations[src].size;
73 * Register a MMIO region access handler for a cell.
74 * @param cell Cell than can access the region.
75 * @param start Region start address in cell address space.
76 * @param size Region size.
77 * @param handler Access handler.
78 * @param handler_arg Opaque argument to pass to handler.
80 * @see mmio_region_unregister
82 void mmio_region_register(struct cell *cell, unsigned long start,
83 unsigned long size, mmio_handler handler,
86 unsigned int index, n;
88 spin_lock(&cell->mmio_region_lock);
90 if (cell->num_mmio_regions >= cell->max_mmio_regions) {
91 spin_unlock(&cell->mmio_region_lock);
93 printk("WARNING: Overflow during MMIO region registration!\n");
97 for (index = 0; index < cell->num_mmio_regions; index++)
98 if (cell->mmio_locations[index].start > start)
102 * Set and commit a dummy region at the end of the list so that
103 * we can safely grow it.
105 cell->mmio_locations[cell->num_mmio_regions].start = -1;
106 cell->mmio_locations[cell->num_mmio_regions].size = 0;
110 * Extend region list by one so that we can start moving entries.
111 * Commit this change via a barrier so that the current last element
112 * will remain visible when moving it up.
114 cell->num_mmio_regions++;
117 for (n = cell->num_mmio_regions - 1; n > index; n--)
118 copy_region(cell, n - 1, n);
120 /* Invalidate the new region entry first (see also copy_region()). */
121 cell->mmio_locations[index].size = 0;
124 cell->mmio_locations[index].start = start;
125 cell->mmio_handlers[index].handler = handler;
126 cell->mmio_handlers[index].arg = handler_arg;
127 /* Ensure all fields are committed before activating the region. */
130 cell->mmio_locations[index].size = size;
132 spin_unlock(&cell->mmio_region_lock);
135 static int find_region(struct cell *cell, unsigned long address,
138 unsigned int range_start = 0;
139 unsigned int range_size = cell->num_mmio_regions;
140 struct mmio_region_location region;
143 while (range_size > 0) {
144 index = range_start + range_size / 2;
145 region = cell->mmio_locations[index];
147 if (address < region.start) {
148 range_size = index - range_start;
149 } else if (region.start + region.size < address + size) {
150 range_size -= index + 1 - range_start;
151 range_start = index + 1;
160 * Unregister MMIO region from a cell.
161 * @param cell Cell the region belongs to.
162 * @param start Region start address as it was passed to
163 * mmio_region_register().
165 * @see mmio_region_register
167 void mmio_region_unregister(struct cell *cell, unsigned long start)
171 spin_lock(&cell->mmio_region_lock);
173 index = find_region(cell, start, 0);
175 for (/* empty */; index < cell->num_mmio_regions; index++)
176 copy_region(cell, index + 1, index);
179 * Ensure the last region move is visible before shrinking the
184 cell->num_mmio_regions--;
186 spin_unlock(&cell->mmio_region_lock);
190 * Dispatch MMIO access of a cell CPU.
191 * @param mmio MMIO access description. @a mmio->value will receive the
192 * result of a successful read access. All @a mmio fields
193 * may have been modified on return.
195 * @return MMIO_HANDLED on success, MMIO_UNHANDLED if no region is registered
196 * for the access address and size, or MMIO_ERROR if an access error was
199 * @see mmio_region_register
200 * @see mmio_region_unregister
202 enum mmio_result mmio_handle_access(struct mmio_access *mmio)
204 struct cell *cell = this_cell();
205 int index = find_region(cell, mmio->address, mmio->size);
206 mmio_handler handler;
209 return MMIO_UNHANDLED;
211 handler = cell->mmio_handlers[index].handler;
212 mmio->address -= cell->mmio_locations[index].start;
213 return handler(cell->mmio_handlers[index].arg, mmio);
217 * Perform MMIO-specific cleanup for a cell under destruction.
218 * @param cell Cell to be destructed.
220 * @see mmio_cell_init
222 void mmio_cell_exit(struct cell *cell)
224 page_free(&mem_pool, cell->mmio_locations,
225 PAGES(cell->max_mmio_regions *
226 (sizeof(struct mmio_region_location) +
227 sizeof(struct mmio_region_handler))));
230 void mmio_perform_access(void *base, struct mmio_access *mmio)
232 void *addr = base + mmio->address;
235 switch (mmio->size) {
237 mmio_write8(addr, mmio->value);
240 mmio_write16(addr, mmio->value);
243 mmio_write32(addr, mmio->value);
245 #if BITS_PER_LONG == 64
247 mmio_write64(addr, mmio->value);
252 switch (mmio->size) {
254 mmio->value = mmio_read8(addr);
257 mmio->value = mmio_read16(addr);
260 mmio->value = mmio_read32(addr);
262 #if BITS_PER_LONG == 64
264 mmio->value = mmio_read64(addr);
270 static enum mmio_result mmio_handle_subpage(void *arg, struct mmio_access *mmio)
272 const struct jailhouse_memory *mem = arg;
273 u64 perm = mmio->is_write ? JAILHOUSE_MEM_WRITE : JAILHOUSE_MEM_READ;
274 unsigned long page_virt = TEMPORARY_MAPPING_BASE +
275 this_cpu_id() * PAGE_SIZE * NUM_TEMPORARY_PAGES;
276 unsigned long page_phys =
277 ((unsigned long)mem->phys_start + mmio->address) & PAGE_MASK;
278 unsigned long virt_base;
281 /* check read/write access permissions */
282 if (!(mem->flags & perm))
285 /* width bit according to access size needs to be set */
286 if (!((mmio->size << JAILHOUSE_MEM_IO_WIDTH_SHIFT) & mem->flags))
289 /* naturally unaligned access needs to be allowed explicitly */
290 if (mmio->address & (mmio->size - 1) &&
291 !(mem->flags & JAILHOUSE_MEM_IO_UNALIGNED))
294 err = paging_create(&hv_paging_structs, page_phys, PAGE_SIZE,
295 page_virt, PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
296 PAGING_NON_COHERENT);
301 * This virt_base gives the following effective virtual address in
302 * mmio_perform_access:
304 * page_virt + (mem->phys_start & ~PAGE_MASK) +
305 * (mmio->address & ~PAGE_MASK)
307 * Reason: mmio_perform_access does addr = base + mmio->address.
309 virt_base = page_virt + (mem->phys_start & ~PAGE_MASK) -
310 (mmio->address & PAGE_MASK);
311 mmio_perform_access((void *)virt_base, mmio);
315 panic_printk("FATAL: Invalid MMIO %s, address: %x, size: %x\n",
316 mmio->is_write ? "write" : "read",
317 mem->phys_start + mmio->address, mmio->size);
321 int mmio_subpage_register(struct cell *cell, const struct jailhouse_memory *mem)
323 mmio_region_register(cell, mem->virt_start, mem->size,
324 mmio_handle_subpage, (void *)mem);
328 void mmio_subpage_unregister(struct cell *cell,
329 const struct jailhouse_memory *mem)
331 mmio_region_unregister(cell, mem->virt_start);