2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2014
7 * Jan Kiszka <jan.kiszka@siemens.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/control.h>
14 #include <jailhouse/mmio.h>
15 #include <jailhouse/printk.h>
17 #include <asm/ioapic.h>
18 #include <asm/iommu.h>
19 #include <asm/spinlock.h>
21 #include <jailhouse/cell-config.h>
23 #define IOAPIC_MAX_CHIPS (PAGE_SIZE / sizeof(struct cell_ioapic))
25 #define IOAPIC_REG_INDEX 0x00
26 #define IOAPIC_REG_DATA 0x10
27 #define IOAPIC_REG_EOI 0x40
28 #define IOAPIC_ID 0x00
29 #define IOAPIC_VER 0x01
30 #define IOAPIC_REDIR_TBL_START 0x10
31 #define IOAPIC_REDIR_TBL_END 0x3f
32 # define IOAPIC_REDIR_MASK (1 << 16)
34 enum ioapic_handover {PINS_ACTIVE, PINS_MASKED};
36 #define for_each_phys_ioapic(ioapic, counter) \
37 for ((ioapic) = &phys_ioapics[0], (counter) = 0; \
38 (counter) < num_phys_ioapics; \
39 (ioapic)++, (counter)++)
41 #define for_each_cell_ioapic(ioapic, cell, counter) \
42 for ((ioapic) = (cell)->arch.ioapics, (counter) = 0; \
43 (counter) < (cell)->arch.num_ioapics; \
44 (ioapic)++, (counter)++)
46 static struct phys_ioapic phys_ioapics[IOAPIC_MAX_CHIPS];
47 static unsigned int num_phys_ioapics;
49 static u32 ioapic_reg_read(struct phys_ioapic *ioapic, unsigned int reg)
53 spin_lock(&ioapic->lock);
55 mmio_write32(ioapic->reg_base + IOAPIC_REG_INDEX, reg);
56 value = mmio_read32(ioapic->reg_base + IOAPIC_REG_DATA);
58 spin_unlock(&ioapic->lock);
63 static void ioapic_reg_write(struct phys_ioapic *ioapic, unsigned int reg,
66 spin_lock(&ioapic->lock);
68 mmio_write32(ioapic->reg_base + IOAPIC_REG_INDEX, reg);
69 mmio_write32(ioapic->reg_base + IOAPIC_REG_DATA, value);
71 spin_unlock(&ioapic->lock);
74 static struct apic_irq_message
75 ioapic_translate_redir_entry(struct cell_ioapic *ioapic, unsigned int pin,
76 union ioapic_redir_entry entry)
78 struct apic_irq_message irq_msg = { .valid = 0 };
79 unsigned int idx, ioapic_id;
81 if (iommu_cell_emulates_ir(ioapic->cell)) {
82 if (!entry.remap.remapped)
85 idx = entry.remap.int_index | (entry.remap.int_index15 << 15);
86 ioapic_id = ioapic->info->id;
88 return iommu_get_remapped_root_int(ioapic_id >> 16,
89 (u16)ioapic_id, pin, idx);
92 irq_msg.vector = entry.native.vector;
93 irq_msg.delivery_mode = entry.native.delivery_mode;
94 irq_msg.level_triggered = entry.native.level_triggered;
95 irq_msg.dest_logical = entry.native.dest_logical;
96 /* align redir_hint and dest_logical - required by iommu_map_interrupt */
97 irq_msg.redir_hint = irq_msg.dest_logical;
99 irq_msg.destination = entry.native.destination;
104 static int ioapic_virt_redir_write(struct cell_ioapic *ioapic,
105 unsigned int reg, u32 value)
107 unsigned int pin = (reg - IOAPIC_REDIR_TBL_START) / 2;
108 struct phys_ioapic *phys_ioapic = ioapic->phys_ioapic;
109 struct apic_irq_message irq_msg;
110 union ioapic_redir_entry entry;
113 entry = phys_ioapic->shadow_redir_table[pin];
114 entry.raw[reg & 1] = value;
115 phys_ioapic->shadow_redir_table[pin] = entry;
117 /* Do not map the interrupt while masked. */
118 if (entry.native.mask) {
120 * The mask is part of the lower 32 bits. Apply it when that
121 * register half is written.
124 ioapic_reg_write(phys_ioapic, reg, IOAPIC_REDIR_MASK);
128 irq_msg = ioapic_translate_redir_entry(ioapic, pin, entry);
130 result = iommu_map_interrupt(ioapic->cell, (u16)ioapic->info->id, pin,
133 if (result == -ENOSYS) {
135 * Upper 32 bits aren't written when the register is masked.
136 * Write them unconditionally when unmasking to keep an entry
137 * in the consistent state.
139 ioapic_reg_write(phys_ioapic, reg | 1, entry.raw[1]);
140 ioapic_reg_write(phys_ioapic, reg, entry.raw[reg & 1]);
146 entry.remap.zero = 0;
147 entry.remap.int_index15 = result >> 15;
148 entry.remap.remapped = 1;
149 entry.remap.int_index = result;
150 ioapic_reg_write(phys_ioapic, reg, entry.raw[reg & 1]);
155 static void ioapic_mask_cell_pins(struct cell_ioapic *ioapic,
156 enum ioapic_handover handover)
158 struct phys_ioapic *phys_ioapic = ioapic->phys_ioapic;
159 struct apic_irq_message irq_msg;
160 union ioapic_redir_entry entry;
161 unsigned int pin, reg;
163 for (pin = 0; pin < IOAPIC_NUM_PINS; pin++) {
164 if (!(ioapic->pin_bitmap & (1UL << pin)))
167 reg = IOAPIC_REDIR_TBL_START + pin * 2;
169 entry.raw[0] = ioapic_reg_read(phys_ioapic, reg);
170 if (entry.remap.mask)
173 ioapic_reg_write(phys_ioapic, reg, IOAPIC_REDIR_MASK);
175 if (handover == PINS_MASKED) {
176 phys_ioapic->shadow_redir_table[pin].native.mask = 1;
177 } else if (!entry.native.level_triggered) {
179 * Inject edge-triggered interrupts to avoid losing
180 * events while masked. Linux can handle rare spurious
183 entry = phys_ioapic->shadow_redir_table[pin];
184 irq_msg = ioapic_translate_redir_entry(ioapic, pin,
187 apic_send_irq(irq_msg);
192 int ioapic_init(void)
196 err = ioapic_cell_init(&root_cell);
200 ioapic_prepare_handover();
205 void ioapic_prepare_handover(void)
207 enum ioapic_handover handover;
208 struct cell_ioapic *ioapic;
212 for_each_cell(cell) {
213 handover = (cell == &root_cell) ? PINS_ACTIVE : PINS_MASKED;
214 for_each_cell_ioapic(ioapic, cell, n)
215 ioapic_mask_cell_pins(ioapic, handover);
219 static struct phys_ioapic *
220 ioapic_get_or_add_phys(const struct jailhouse_irqchip *irqchip)
222 struct phys_ioapic *phys_ioapic;
223 unsigned int n, index;
226 for_each_phys_ioapic(phys_ioapic, n)
227 if (phys_ioapic->base_addr == irqchip->address)
230 if (num_phys_ioapics == IOAPIC_MAX_CHIPS)
231 return trace_error(NULL);
233 phys_ioapic->reg_base = page_alloc(&remap_pool, 1);
234 if (!phys_ioapic->reg_base)
235 return trace_error(NULL);
236 err = paging_create(&hv_paging_structs, irqchip->address, PAGE_SIZE,
237 (unsigned long)phys_ioapic->reg_base,
238 PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
239 PAGING_NON_COHERENT);
241 page_free(&remap_pool, phys_ioapic->reg_base, 1);
245 phys_ioapic->base_addr = irqchip->address;
248 for (index = 0; index < IOAPIC_NUM_PINS * 2; index++)
249 phys_ioapic->shadow_redir_table[index / 2].raw[index % 2] =
250 ioapic_reg_read(phys_ioapic,
251 IOAPIC_REDIR_TBL_START + index);
256 static struct cell_ioapic *ioapic_find_by_address(struct cell *cell,
257 unsigned long address)
259 struct cell_ioapic *ioapic;
262 for_each_cell_ioapic(ioapic, cell, n) {
263 unsigned long base = ioapic->info->address;
265 if (address >= base && address < base + PAGE_SIZE)
272 * x86_ioapic_handler() - Handler for accesses to IOAPIC
273 * @cell: Request issuing cell
274 * @is_write: True if write access
275 * @addr: Address accessed
276 * @value: Pointer to value for reading/writing
278 * Return: 1 if handled successfully, 0 if unhandled, -1 on access error
280 int ioapic_access_handler(struct cell *cell, bool is_write, u64 addr,
283 union ioapic_redir_entry *shadow_table;
284 struct cell_ioapic *ioapic;
287 ioapic = ioapic_find_by_address(cell, addr);
291 switch (addr - ioapic->info->address) {
292 case IOAPIC_REG_INDEX:
294 ioapic->index_reg_val = *value;
296 *value = ioapic->index_reg_val;
298 case IOAPIC_REG_DATA:
299 index = ioapic->index_reg_val;
301 if (index == IOAPIC_ID || index == IOAPIC_VER) {
304 *value = ioapic_reg_read(ioapic->phys_ioapic, index);
308 if (index < IOAPIC_REDIR_TBL_START ||
309 index > IOAPIC_REDIR_TBL_END)
312 entry = (index - IOAPIC_REDIR_TBL_START) / 2;
313 if ((ioapic->pin_bitmap & (1UL << entry)) == 0)
317 if (ioapic_virt_redir_write(ioapic, index, *value) < 0)
320 index -= IOAPIC_REDIR_TBL_START;
321 shadow_table = ioapic->phys_ioapic->shadow_redir_table;
322 *value = shadow_table[index / 2].raw[index % 2];
326 if (!is_write || ioapic->pin_bitmap == 0)
329 * Just write the EOI if the cell has any assigned pin. It
330 * would be complex to virtualize it in a way that cells are
331 * unable to ack vectors of other cells. It is therefore not
332 * recommended to use level-triggered IOAPIC interrupts in
335 mmio_write32(ioapic->phys_ioapic->reg_base + IOAPIC_REG_EOI,
341 panic_printk("FATAL: Invalid IOAPIC %s, reg: %x, index: %x\n",
342 is_write ? "write" : "read", addr - ioapic->info->address,
343 ioapic->index_reg_val);
347 int ioapic_cell_init(struct cell *cell)
349 const struct jailhouse_irqchip *irqchip =
350 jailhouse_cell_irqchips(cell->config);
351 struct cell_ioapic *ioapic, *root_ioapic;
352 struct phys_ioapic *phys_ioapic;
355 if (cell->config->num_irqchips == 0)
357 if (cell->config->num_irqchips > IOAPIC_MAX_CHIPS)
358 return trace_error(-ERANGE);
360 cell->arch.ioapics = page_alloc(&mem_pool, 1);
361 if (!cell->arch.ioapics)
364 for (n = 0; n < cell->config->num_irqchips; n++, irqchip++) {
365 phys_ioapic = ioapic_get_or_add_phys(irqchip);
369 ioapic = &cell->arch.ioapics[n];
370 ioapic->info = irqchip;
372 ioapic->phys_ioapic = phys_ioapic;
373 ioapic->pin_bitmap = (u32)irqchip->pin_bitmap;
374 cell->arch.num_ioapics++;
376 if (cell != &root_cell) {
377 root_ioapic = ioapic_find_by_address(&root_cell,
380 root_ioapic->pin_bitmap &= ~ioapic->pin_bitmap;
381 ioapic_mask_cell_pins(ioapic, PINS_MASKED);
389 void ioapic_cell_exit(struct cell *cell)
391 struct cell_ioapic *ioapic, *root_ioapic;
394 for_each_cell_ioapic(ioapic, cell, n) {
395 ioapic_mask_cell_pins(ioapic, PINS_MASKED);
397 root_ioapic = ioapic_find_by_address(&root_cell,
398 ioapic->info->address);
400 root_ioapic->pin_bitmap |=
402 root_ioapic->info->pin_bitmap;
405 page_free(&mem_pool, cell->arch.ioapics, 1);
408 void ioapic_config_commit(struct cell *cell_added_removed)
410 union ioapic_redir_entry entry;
411 struct cell_ioapic *ioapic;
412 unsigned int pin, reg, n;
414 if (!cell_added_removed)
417 for_each_cell_ioapic(ioapic, &root_cell, n)
418 for (pin = 0; pin < IOAPIC_NUM_PINS; pin++) {
419 if (!(ioapic->pin_bitmap & (1UL << pin)))
422 entry = ioapic->phys_ioapic->shadow_redir_table[pin];
423 reg = IOAPIC_REDIR_TBL_START + pin * 2;
425 /* write high word first to preserve mask initially */
426 if (ioapic_virt_redir_write(ioapic, reg + 1,
428 ioapic_virt_redir_write(ioapic, reg,
430 panic_printk("FATAL: Unsupported IOAPIC "
431 "state, pin %d\n", pin);
437 void ioapic_shutdown(void)
439 union ioapic_redir_entry *shadow_table;
440 struct phys_ioapic *phys_ioapic;
444 for_each_phys_ioapic(phys_ioapic, n) {
445 shadow_table = phys_ioapic->shadow_redir_table;
447 /* write in reverse order to preserve the mask as long as
449 for (index = IOAPIC_NUM_PINS * 2 - 1; index >= 0; index--)
450 ioapic_reg_write(phys_ioapic,
451 IOAPIC_REDIR_TBL_START + index,
452 shadow_table[index / 2].raw[index % 2]);