2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013-2016
5 * Copyright (c) Valentine Sinitsyn, 2014
8 * Jan Kiszka <jan.kiszka@siemens.com>
9 * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
15 #include <jailhouse/control.h>
16 #include <jailhouse/mmio.h>
17 #include <jailhouse/paging.h>
18 #include <jailhouse/pci.h>
19 #include <jailhouse/printk.h>
20 #include <jailhouse/string.h>
23 #include <asm/iommu.h>
24 #include <asm/bitops.h>
25 #include <asm/ioapic.h>
26 #include <asm/spinlock.h>
29 /* A unit can occupy up to 3 pages for registers, we reserve 4. */
30 #define DMAR_MMIO_SIZE (PAGE_SIZE * 4)
32 struct vtd_irte_usage {
36 } __attribute__((packed));
38 struct vtd_emulation {
40 unsigned int irt_entries;
41 struct vtd_irte_usage *irte_map;
51 static const struct vtd_entry inv_global_context = {
52 .lo_word = VTD_REQ_INV_CONTEXT | VTD_INV_CONTEXT_GLOBAL,
54 static const struct vtd_entry inv_global_iotlb = {
55 .lo_word = VTD_REQ_INV_IOTLB | VTD_INV_IOTLB_GLOBAL |
56 VTD_INV_IOTLB_DW | VTD_INV_IOTLB_DR,
58 static const struct vtd_entry inv_global_int = {
59 .lo_word = VTD_REQ_INV_INT | VTD_INV_INT_GLOBAL,
62 /* TODO: Support multiple segments */
63 static struct vtd_entry __attribute__((aligned(PAGE_SIZE)))
64 root_entry_table[256];
65 static union vtd_irte *int_remap_table;
66 static unsigned int int_remap_table_size_log2;
67 static struct paging vtd_paging[VTD_MAX_PAGE_TABLE_LEVELS];
68 static void *dmar_reg_base;
69 static void *unit_inv_queue;
70 static unsigned int dmar_units;
71 static unsigned int dmar_pt_levels;
72 static unsigned int dmar_num_did = ~0U;
73 static unsigned int fault_reporting_cpu_id;
74 static DEFINE_SPINLOCK(inv_queue_lock);
75 static struct vtd_emulation root_cell_units[JAILHOUSE_MAX_IOMMU_UNITS];
76 static bool dmar_units_initialized;
78 unsigned int iommu_mmio_count_regions(struct cell *cell)
80 return cell == &root_cell ? iommu_count_units() : 0;
83 static unsigned int inv_queue_write(void *inv_queue, unsigned int index,
84 struct vtd_entry content)
86 struct vtd_entry *entry = inv_queue;
88 entry[index] = content;
89 arch_paging_flush_cpu_caches(&entry[index], sizeof(*entry));
91 return (index + 1) % (PAGE_SIZE / sizeof(*entry));
94 static void vtd_submit_iq_request(void *reg_base, void *inv_queue,
95 const struct vtd_entry *inv_request)
97 volatile u32 completed = 0;
98 struct vtd_entry inv_wait = {
99 .lo_word = VTD_REQ_INV_WAIT | VTD_INV_WAIT_SW |
100 VTD_INV_WAIT_FN | (1UL << VTD_INV_WAIT_SDATA_SHIFT),
101 .hi_word = paging_hvirt2phys(&completed),
105 spin_lock(&inv_queue_lock);
107 index = mmio_read64_field(reg_base + VTD_IQT_REG, VTD_IQT_QT_MASK);
110 index = inv_queue_write(inv_queue, index, *inv_request);
111 index = inv_queue_write(inv_queue, index, inv_wait);
113 mmio_write64_field(reg_base + VTD_IQT_REG, VTD_IQT_QT_MASK, index);
118 spin_unlock(&inv_queue_lock);
121 static void vtd_flush_domain_caches(unsigned int did)
123 const struct vtd_entry inv_context = {
124 .lo_word = VTD_REQ_INV_CONTEXT | VTD_INV_CONTEXT_DOMAIN |
125 (did << VTD_INV_CONTEXT_DOMAIN_SHIFT),
127 const struct vtd_entry inv_iotlb = {
128 .lo_word = VTD_REQ_INV_IOTLB | VTD_INV_IOTLB_DOMAIN |
129 VTD_INV_IOTLB_DW | VTD_INV_IOTLB_DR |
130 (did << VTD_INV_IOTLB_DOMAIN_SHIFT),
132 void *inv_queue = unit_inv_queue;
133 void *reg_base = dmar_reg_base;
136 for (n = 0; n < dmar_units; n++) {
137 vtd_submit_iq_request(reg_base, inv_queue, &inv_context);
138 vtd_submit_iq_request(reg_base, inv_queue, &inv_iotlb);
139 reg_base += DMAR_MMIO_SIZE;
140 inv_queue += PAGE_SIZE;
144 static void vtd_update_gcmd_reg(void *reg_base, u32 mask, unsigned int set)
146 u32 val = mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_USED_CTRLS;
152 mmio_write32(reg_base + VTD_GCMD_REG, val);
154 /* Note: This test is built on the fact related bits are at the same
155 * position in VTD_GCMD_REG and VTD_GSTS_REG. */
156 while ((mmio_read32(reg_base + VTD_GSTS_REG) & mask) != (val & mask))
160 static void vtd_set_next_pt(pt_entry_t pte, unsigned long next_pt)
162 *pte = (next_pt & BIT_MASK(51, 12)) | VTD_PAGE_READ | VTD_PAGE_WRITE;
165 static void vtd_init_fault_nmi(void)
167 union x86_msi_vector msi = { .native.address = MSI_ADDRESS_VALUE };
168 void *reg_base = dmar_reg_base;
169 struct per_cpu *cpu_data;
172 /* This assumes that at least one bit is set somewhere because we
173 * don't support configurations where Linux is left with no CPUs. */
174 for (n = 0; root_cell.cpu_set->bitmap[n] == 0; n++)
176 cpu_data = per_cpu(ffsl(root_cell.cpu_set->bitmap[n]));
178 /* We only support 8-bit APIC IDs. */
179 msi.native.destination = (u8)cpu_data->apic_id;
181 /* Save this value globally to avoid multiple reports of the same
182 * case from different CPUs */
183 fault_reporting_cpu_id = cpu_data->cpu_id;
185 for (n = 0; n < dmar_units; n++, reg_base += DMAR_MMIO_SIZE) {
187 mmio_write32_field(reg_base + VTD_FECTL_REG, VTD_FECTL_IM, 1);
190 * VT-d spec rev. 2.3 section 7.4 suggests that only reading
191 * back FSTS or FECTL ensures no interrupt messages are still
192 * in-flight when we change their destination below.
194 mmio_read32(reg_base + VTD_FECTL_REG);
196 /* Program MSI message to send NMIs to the target CPU */
197 mmio_write32(reg_base + VTD_FEDATA_REG, MSI_DM_NMI);
198 mmio_write32(reg_base + VTD_FEADDR_REG, (u32)msi.raw.address);
199 mmio_write32(reg_base + VTD_FEUADDR_REG, 0);
202 mmio_write32_field(reg_base + VTD_FECTL_REG, VTD_FECTL_IM, 0);
206 * There is a race window between setting the new reporting CPU ID and
207 * updating the target programming in the register. If a fault hits us
208 * in this window and no other NMIs arrive after that, the event will
209 * not be reported. Address this by triggering an NMI on the new
212 apic_send_nmi_ipi(cpu_data);
215 static void *vtd_get_fault_rec_reg_addr(void *reg_base)
217 return reg_base + 16 *
218 mmio_read64_field(reg_base + VTD_CAP_REG, VTD_CAP_FRO_MASK);
221 static void vtd_print_fault_record_reg_status(unsigned int unit_no,
224 unsigned int sid = mmio_read64_field(reg_base + VTD_FRCD_HI_REG,
225 VTD_FRCD_HI_SID_MASK);
226 unsigned int fr = mmio_read64_field(reg_base + VTD_FRCD_HI_REG,
227 VTD_FRCD_HI_FR_MASK);
228 unsigned long fi = mmio_read64_field(reg_base + VTD_FRCD_LO_REG,
229 VTD_FRCD_LO_FI_MASK);
230 unsigned int type = mmio_read64_field(reg_base + VTD_FRCD_HI_REG,
233 printk("VT-d fault event reported by IOMMU %d:\n", unit_no);
234 printk(" Source Identifier (bus:dev.func): %02x:%02x.%x\n",
235 PCI_BDF_PARAMS(sid));
236 printk(" Fault Reason: 0x%x Fault Info: %lx Type %d\n", fr, fi, type);
239 void iommu_check_pending_faults(void)
241 unsigned int fr_index;
242 void *reg_base = dmar_reg_base;
244 void *fault_reg_addr, *rec_reg_addr;
246 if (this_cpu_id() != fault_reporting_cpu_id)
249 for (n = 0; n < dmar_units; n++, reg_base += DMAR_MMIO_SIZE)
250 if (mmio_read32_field(reg_base + VTD_FSTS_REG, VTD_FSTS_PPF)) {
251 fr_index = mmio_read32_field(reg_base + VTD_FSTS_REG,
253 fault_reg_addr = vtd_get_fault_rec_reg_addr(reg_base);
254 rec_reg_addr = fault_reg_addr + 16 * fr_index;
255 vtd_print_fault_record_reg_status(n, rec_reg_addr);
257 /* Clear faults in record registers */
258 mmio_write64_field(rec_reg_addr + VTD_FRCD_HI_REG,
259 VTD_FRCD_HI_F, VTD_FRCD_HI_F_CLEAR);
263 static int vtd_emulate_inv_int(unsigned int unit_no, unsigned int index)
265 struct vtd_irte_usage *irte_usage;
266 struct apic_irq_message irq_msg;
267 struct pci_device *device;
269 if (index >= root_cell_units[unit_no].irt_entries)
271 irte_usage = &root_cell_units[unit_no].irte_map[index];
272 if (!irte_usage->used)
275 device = pci_get_assigned_device(&root_cell, irte_usage->device_id);
276 if (device && device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
277 return pci_ivshmem_update_msix(device);
279 irq_msg = iommu_get_remapped_root_int(unit_no, irte_usage->device_id,
280 irte_usage->vector, index);
281 return iommu_map_interrupt(&root_cell, irte_usage->device_id,
282 irte_usage->vector, irq_msg);
285 static int vtd_emulate_qi_request(unsigned int unit_no,
286 struct vtd_entry inv_desc)
288 unsigned int start, count, n;
292 switch (inv_desc.lo_word & VTD_REQ_INV_MASK) {
293 case VTD_REQ_INV_INT:
294 if (inv_desc.lo_word & VTD_INV_INT_INDEX) {
295 start = (inv_desc.lo_word & VTD_INV_INT_IIDX_MASK) >>
296 VTD_INV_INT_IIDX_SHIFT;
298 1 << ((inv_desc.lo_word & VTD_INV_INT_IM_MASK) >>
299 VTD_INV_INT_IM_SHIFT);
302 count = root_cell_units[unit_no].irt_entries;
304 for (n = start; n < start + count; n++) {
305 result = vtd_emulate_inv_int(unit_no, n);
310 case VTD_REQ_INV_WAIT:
311 if (inv_desc.lo_word & VTD_INV_WAIT_IF ||
312 !(inv_desc.lo_word & VTD_INV_WAIT_SW))
315 status_page = paging_get_guest_pages(NULL, inv_desc.hi_word, 1,
320 *(u32 *)(status_page + (inv_desc.hi_word & ~PAGE_MASK)) =
321 inv_desc.lo_word >> 32;
328 static enum mmio_result vtd_unit_access_handler(void *arg,
329 struct mmio_access *mmio)
331 struct vtd_emulation *unit = arg;
332 unsigned int unit_no = unit - root_cell_units;
333 struct vtd_entry inv_desc;
336 if (mmio->address == VTD_FSTS_REG && !mmio->is_write) {
338 * Nothing to report this way, vtd_check_pending_faults takes
339 * care for the whole system.
344 if (mmio->address == VTD_IQT_REG && mmio->is_write) {
345 while (unit->iqh != (mmio->value & ~PAGE_MASK)) {
347 paging_get_guest_pages(NULL, unit->iqa, 1,
348 PAGE_READONLY_FLAGS);
350 goto invalid_iq_entry;
353 *(struct vtd_entry *)(inv_desc_page + unit->iqh);
355 if (vtd_emulate_qi_request(unit_no, inv_desc) != 0)
356 goto invalid_iq_entry;
358 unit->iqh += 1 << VTD_IQH_QH_SHIFT;
359 unit->iqh &= ~PAGE_MASK;
363 panic_printk("FATAL: Unhandled DMAR unit %s access, register %02x\n",
364 mmio->is_write ? "write" : "read", mmio->address);
368 panic_printk("FATAL: Invalid/unsupported invalidation queue entry\n");
372 static void vtd_init_unit(void *reg_base, void *inv_queue)
374 void *fault_reg_base;
377 /* Disabled QI and IR in case it was already on */
378 vtd_update_gcmd_reg(reg_base, VTD_GCMD_QIE, 0);
379 vtd_update_gcmd_reg(reg_base, VTD_GCMD_IRE, 0);
381 nfr = mmio_read64_field(reg_base + VTD_CAP_REG, VTD_CAP_NFR_MASK);
382 fault_reg_base = vtd_get_fault_rec_reg_addr(reg_base);
384 for (n = 0; n < nfr; n++)
385 /* Clear fault recording register status */
386 mmio_write64_field(fault_reg_base + 16 * n + VTD_FRCD_HI_REG,
387 VTD_FRCD_HI_F, VTD_FRCD_HI_F_CLEAR);
389 /* Clear fault overflow status */
390 mmio_write32_field(reg_base + VTD_FSTS_REG, VTD_FSTS_PFO,
393 /* Set root entry table pointer */
394 mmio_write64(reg_base + VTD_RTADDR_REG,
395 paging_hvirt2phys(root_entry_table));
396 vtd_update_gcmd_reg(reg_base, VTD_GCMD_SRTP, 1);
398 /* Set interrupt remapping table pointer */
399 mmio_write64(reg_base + VTD_IRTA_REG,
400 paging_hvirt2phys(int_remap_table) |
401 (using_x2apic ? VTD_IRTA_EIME : 0) |
402 (int_remap_table_size_log2 - 1));
403 vtd_update_gcmd_reg(reg_base, VTD_GCMD_SIRTP, 1);
405 /* Setup and activate invalidation queue */
406 mmio_write64(reg_base + VTD_IQT_REG, 0);
407 mmio_write64(reg_base + VTD_IQA_REG, paging_hvirt2phys(inv_queue));
408 vtd_update_gcmd_reg(reg_base, VTD_GCMD_QIE, 1);
410 vtd_submit_iq_request(reg_base, inv_queue, &inv_global_context);
411 vtd_submit_iq_request(reg_base, inv_queue, &inv_global_iotlb);
412 vtd_submit_iq_request(reg_base, inv_queue, &inv_global_int);
414 vtd_update_gcmd_reg(reg_base, VTD_GCMD_TE, 1);
415 vtd_update_gcmd_reg(reg_base, VTD_GCMD_IRE, 1);
418 static int vtd_init_ir_emulation(unsigned int unit_no, void *reg_base)
420 struct vtd_emulation *unit = &root_cell_units[unit_no];
421 unsigned long base, size;
424 root_cell.arch.vtd.ir_emulation = true;
426 base = system_config->platform_info.x86.iommu_units[unit_no].base;
427 mmio_region_register(&root_cell, base, PAGE_SIZE,
428 vtd_unit_access_handler, unit);
430 unit->irta = mmio_read64(reg_base + VTD_IRTA_REG);
431 unit->irt_entries = 2 << (unit->irta & VTD_IRTA_SIZE_MASK);
433 size = PAGE_ALIGN(sizeof(struct vtd_irte_usage) * unit->irt_entries);
434 unit->irte_map = page_alloc(&mem_pool, size / PAGE_SIZE);
438 iqt = mmio_read64(reg_base + VTD_IQT_REG);
439 while (mmio_read64(reg_base + VTD_IQH_REG) != iqt)
443 unit->iqa = mmio_read64(reg_base + VTD_IQA_REG);
444 if (unit->iqa & ~VTD_IQA_ADDR_MASK)
445 return trace_error(-EIO);
447 unit->fectl = mmio_read32(reg_base + VTD_FECTL_REG);
448 unit->fedata = mmio_read32(reg_base + VTD_FEDATA_REG);
449 unit->feaddr = mmio_read32(reg_base + VTD_FEADDR_REG);
450 unit->feuaddr = mmio_read32(reg_base + VTD_FEUADDR_REG);
457 unsigned long version, caps, ecaps, ctrls, sllps_caps = ~0UL;
458 unsigned int units, pt_levels, num_did, n;
459 struct jailhouse_iommu *unit;
463 /* n = roundup(log2(system_config->interrupt_limit)) */
464 for (n = 0; (1UL << n) < (system_config->interrupt_limit); n++)
467 return trace_error(-EINVAL);
470 page_alloc(&mem_pool, PAGES(sizeof(union vtd_irte) << n));
471 if (!int_remap_table)
474 int_remap_table_size_log2 = n;
476 units = iommu_count_units();
478 return trace_error(-EINVAL);
480 dmar_reg_base = page_alloc(&remap_pool, units * PAGES(DMAR_MMIO_SIZE));
482 return trace_error(-ENOMEM);
484 unit_inv_queue = page_alloc(&mem_pool, units);
488 for (n = 0; n < units; n++) {
489 unit = &system_config->platform_info.x86.iommu_units[n];
491 reg_base = dmar_reg_base + n * DMAR_MMIO_SIZE;
493 err = paging_create(&hv_paging_structs, unit->base, unit->size,
494 (unsigned long)reg_base,
495 PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
496 PAGING_NON_COHERENT);
500 version = mmio_read64(reg_base + VTD_VER_REG) & VTD_VER_MASK;
501 if (version < VTD_VER_MIN || version == 0xff) {
504 printk("WARNING: No VT-d support found!\n");
508 printk("DMAR unit @0x%lx/0x%x\n", unit->base, unit->size);
510 caps = mmio_read64(reg_base + VTD_CAP_REG);
511 if (caps & VTD_CAP_SAGAW39)
513 else if (caps & VTD_CAP_SAGAW48)
516 return trace_error(-EIO);
519 if (dmar_pt_levels > 0 && dmar_pt_levels != pt_levels)
520 return trace_error(-EIO);
521 dmar_pt_levels = pt_levels;
523 if (caps & VTD_CAP_CM)
524 return trace_error(-EIO);
526 ecaps = mmio_read64(reg_base + VTD_ECAP_REG);
527 if (!(ecaps & VTD_ECAP_QI) || !(ecaps & VTD_ECAP_IR) ||
528 (using_x2apic && !(ecaps & VTD_ECAP_EIM)))
529 return trace_error(-EIO);
531 ctrls = mmio_read32(reg_base + VTD_GSTS_REG) &
534 if (ctrls != (VTD_GSTS_IRES | VTD_GSTS_QIES))
535 return trace_error(-EBUSY);
536 err = vtd_init_ir_emulation(n, reg_base);
539 } else if (root_cell.arch.vtd.ir_emulation) {
540 /* IR+QI must be either on or off in all units */
541 return trace_error(-EIO);
544 num_did = 1 << (4 + (caps & VTD_CAP_NUM_DID_MASK) * 2);
545 if (num_did < dmar_num_did)
546 dmar_num_did = num_did;
552 * Derive vdt_paging from very similar x86_64_paging,
553 * replicating 0..3 for 4 levels and 1..3 for 3 levels.
555 memcpy(vtd_paging, &x86_64_paging[4 - dmar_pt_levels],
556 sizeof(struct paging) * dmar_pt_levels);
557 for (n = 0; n < dmar_pt_levels; n++)
558 vtd_paging[n].set_next_pt = vtd_set_next_pt;
559 if (!(sllps_caps & VTD_CAP_SLLPS1G))
560 vtd_paging[dmar_pt_levels - 3].page_size = 0;
561 if (!(sllps_caps & VTD_CAP_SLLPS2M))
562 vtd_paging[dmar_pt_levels - 2].page_size = 0;
564 return iommu_cell_init(&root_cell);
567 static void vtd_update_irte(unsigned int index, union vtd_irte content)
569 const struct vtd_entry inv_int = {
570 .lo_word = VTD_REQ_INV_INT | VTD_INV_INT_INDEX |
571 ((u64)index << VTD_INV_INT_IIDX_SHIFT),
573 union vtd_irte *irte = &int_remap_table[index];
574 void *inv_queue = unit_inv_queue;
575 void *reg_base = dmar_reg_base;
578 if (content.field.p) {
580 * Write upper half first to preserve non-presence.
581 * If the entry was present before, we are only modifying the
582 * lower half's content (destination etc.), so writing the
583 * upper half becomes a nop and is safely done first.
585 irte->raw[1] = content.raw[1];
587 irte->raw[0] = content.raw[0];
590 * Write only lower half - we are clearing presence and
593 irte->raw[0] = content.raw[0];
595 arch_paging_flush_cpu_caches(irte, sizeof(*irte));
597 for (n = 0; n < dmar_units; n++) {
598 vtd_submit_iq_request(reg_base, inv_queue, &inv_int);
599 reg_base += DMAR_MMIO_SIZE;
600 inv_queue += PAGE_SIZE;
604 static int vtd_find_int_remap_region(u16 device_id)
608 /* interrupt_limit is < 2^16, see vtd_init */
609 for (n = 0; n < system_config->interrupt_limit; n++)
610 if (int_remap_table[n].field.assigned &&
611 int_remap_table[n].field.sid == device_id)
617 static int vtd_reserve_int_remap_region(u16 device_id, unsigned int length)
619 int n, start = -E2BIG;
621 if (length == 0 || vtd_find_int_remap_region(device_id) >= 0)
624 for (n = 0; n < system_config->interrupt_limit; n++) {
625 if (int_remap_table[n].field.assigned) {
631 if (n + 1 == start + length) {
632 printk("Reserving %u interrupt(s) for device %04x "
633 "at index %d\n", length, device_id, start);
634 for (n = start; n < start + length; n++) {
635 int_remap_table[n].field.assigned = 1;
636 int_remap_table[n].field.sid = device_id;
641 return trace_error(-E2BIG);
644 static void vtd_free_int_remap_region(u16 device_id, unsigned int length)
646 union vtd_irte free_irte = { .field.p = 0, .field.assigned = 0 };
647 int pos = vtd_find_int_remap_region(device_id);
650 printk("Freeing %u interrupt(s) for device %04x at index %d\n",
651 length, device_id, pos);
653 vtd_update_irte(pos++, free_irte);
657 int iommu_add_pci_device(struct cell *cell, struct pci_device *device)
659 unsigned int max_vectors = MAX(device->info->num_msi_vectors,
660 device->info->num_msix_vectors);
661 u16 bdf = device->info->bdf;
662 u64 *root_entry_lo = &root_entry_table[PCI_BUS(bdf)].lo_word;
663 struct vtd_entry *context_entry_table, *context_entry;
670 result = vtd_reserve_int_remap_region(bdf, max_vectors);
674 if (*root_entry_lo & VTD_ROOT_PRESENT) {
675 context_entry_table =
676 paging_phys2hvirt(*root_entry_lo & PAGE_MASK);
678 context_entry_table = page_alloc(&mem_pool, 1);
679 if (!context_entry_table)
681 *root_entry_lo = VTD_ROOT_PRESENT |
682 paging_hvirt2phys(context_entry_table);
683 arch_paging_flush_cpu_caches(root_entry_lo, sizeof(u64));
686 context_entry = &context_entry_table[PCI_DEVFN(bdf)];
687 context_entry->lo_word = VTD_CTX_PRESENT | VTD_CTX_TTYPE_MLP_UNTRANS |
688 paging_hvirt2phys(cell->arch.vtd.pg_structs.root_table);
689 context_entry->hi_word =
690 (dmar_pt_levels == 3 ? VTD_CTX_AGAW_39 : VTD_CTX_AGAW_48) |
691 (cell->id << VTD_CTX_DID_SHIFT);
692 arch_paging_flush_cpu_caches(context_entry, sizeof(*context_entry));
697 vtd_free_int_remap_region(bdf, max_vectors);
701 void iommu_remove_pci_device(struct pci_device *device)
703 u16 bdf = device->info->bdf;
704 u64 *root_entry_lo = &root_entry_table[PCI_BUS(bdf)].lo_word;
705 struct vtd_entry *context_entry_table;
706 struct vtd_entry *context_entry;
713 vtd_free_int_remap_region(bdf, MAX(device->info->num_msi_vectors,
714 device->info->num_msix_vectors));
716 context_entry_table = paging_phys2hvirt(*root_entry_lo & PAGE_MASK);
717 context_entry = &context_entry_table[PCI_DEVFN(bdf)];
719 context_entry->lo_word &= ~VTD_CTX_PRESENT;
720 arch_paging_flush_cpu_caches(&context_entry->lo_word, sizeof(u64));
722 for (n = 0; n < 256; n++)
723 if (context_entry_table[n].lo_word & VTD_CTX_PRESENT)
726 *root_entry_lo &= ~VTD_ROOT_PRESENT;
727 arch_paging_flush_cpu_caches(root_entry_lo, sizeof(u64));
728 page_free(&mem_pool, context_entry_table, 1);
731 int iommu_cell_init(struct cell *cell)
733 const struct jailhouse_irqchip *irqchip =
734 jailhouse_cell_irqchips(cell->config);
742 if (cell->id >= dmar_num_did)
743 return trace_error(-ERANGE);
745 cell->arch.vtd.pg_structs.root_paging = vtd_paging;
746 cell->arch.vtd.pg_structs.root_table = page_alloc(&mem_pool, 1);
747 if (!cell->arch.vtd.pg_structs.root_table)
750 /* reserve regions for IRQ chips (if not done already) */
751 for (n = 0; n < cell->config->num_irqchips; n++, irqchip++) {
752 result = vtd_reserve_int_remap_region(irqchip->id,
755 iommu_cell_exit(cell);
763 int iommu_map_memory_region(struct cell *cell,
764 const struct jailhouse_memory *mem)
772 if (!(mem->flags & JAILHOUSE_MEM_DMA))
775 if (mem->virt_start & BIT_MASK(63, 12 + 9 * dmar_pt_levels))
776 return trace_error(-E2BIG);
778 if (mem->flags & JAILHOUSE_MEM_READ)
779 flags |= VTD_PAGE_READ;
780 if (mem->flags & JAILHOUSE_MEM_WRITE)
781 flags |= VTD_PAGE_WRITE;
783 return paging_create(&cell->arch.vtd.pg_structs, mem->phys_start,
784 mem->size, mem->virt_start, flags,
788 int iommu_unmap_memory_region(struct cell *cell,
789 const struct jailhouse_memory *mem)
795 if (!(mem->flags & JAILHOUSE_MEM_DMA))
798 return paging_destroy(&cell->arch.vtd.pg_structs, mem->virt_start,
799 mem->size, PAGING_COHERENT);
802 struct apic_irq_message
803 iommu_get_remapped_root_int(unsigned int iommu, u16 device_id,
804 unsigned int vector, unsigned int remap_index)
806 struct vtd_emulation *unit = &root_cell_units[iommu];
807 struct apic_irq_message irq_msg = { .valid = 0 };
808 union vtd_irte root_irte;
809 unsigned long irte_addr;
812 if (remap_index >= unit->irt_entries)
814 unit->irte_map[remap_index].used = 0;
816 irte_addr = (unit->irta & VTD_IRTA_ADDR_MASK) +
817 remap_index * sizeof(union vtd_irte);
818 irte_page = paging_get_guest_pages(NULL, irte_addr, 1,
819 PAGE_READONLY_FLAGS);
823 root_irte = *(union vtd_irte *)(irte_page + (irte_addr & ~PAGE_MASK));
826 (root_irte.field.p && root_irte.field.sid == device_id);
827 irq_msg.vector = root_irte.field.vector;
828 irq_msg.delivery_mode = root_irte.field.delivery_mode;
829 irq_msg.dest_logical = root_irte.field.dest_logical;
830 irq_msg.level_triggered = root_irte.field.level_triggered;
831 irq_msg.redir_hint = root_irte.field.redir_hint;
832 irq_msg.destination = root_irte.field.destination;
834 /* xAPIC in flat mode: APIC ID in 47:40 (of 63:32) */
835 irq_msg.destination >>= 8;
837 unit->irte_map[remap_index].device_id = device_id;
838 unit->irte_map[remap_index].vector = vector;
839 unit->irte_map[remap_index].used = 1;
844 int iommu_map_interrupt(struct cell *cell, u16 device_id, unsigned int vector,
845 struct apic_irq_message irq_msg)
854 base_index = vtd_find_int_remap_region(device_id);
858 if (vector >= system_config->interrupt_limit ||
859 base_index >= system_config->interrupt_limit - vector)
862 irte = int_remap_table[base_index + vector];
863 if (!irte.field.assigned || irte.field.sid != device_id)
866 irte.field.p = irq_msg.valid;
869 * Do not validate non-present entries, they may contain
870 * invalid data and cause false-positives.
875 * Validate delivery mode and destination(s).
876 * Note that we do support redirection hint only in logical
879 if ((irq_msg.delivery_mode != APIC_MSG_DLVR_FIXED &&
880 irq_msg.delivery_mode != APIC_MSG_DLVR_LOWPRI) ||
881 irq_msg.dest_logical != irq_msg.redir_hint)
883 if (!apic_filter_irq_dest(cell, &irq_msg))
886 irte.field.dest_logical = irq_msg.dest_logical;
887 irte.field.redir_hint = irq_msg.redir_hint;
888 irte.field.level_triggered = irq_msg.level_triggered;
889 irte.field.delivery_mode = irq_msg.delivery_mode;
890 irte.field.vector = irq_msg.vector;
891 irte.field.destination = irq_msg.destination;
893 /* xAPIC in flat mode: APIC ID in 47:40 (of 63:32) */
894 irte.field.destination <<= 8;
895 irte.field.sq = VTD_IRTE_SQ_VERIFY_FULL_SID;
896 irte.field.svt = VTD_IRTE_SVT_VERIFY_SID_SQ;
899 vtd_update_irte(base_index + vector, irte);
901 return base_index + vector;
904 void iommu_cell_exit(struct cell *cell)
910 page_free(&mem_pool, cell->arch.vtd.pg_structs.root_table, 1);
913 * Note that reservation regions of IOAPICs won't be released because
914 * they might be shared with other cells
918 void iommu_config_commit(struct cell *cell_added_removed)
920 void *inv_queue = unit_inv_queue;
921 void *reg_base = dmar_reg_base;
928 if (cell_added_removed)
929 vtd_init_fault_nmi();
931 if (cell_added_removed == &root_cell) {
932 for (n = 0; n < dmar_units; n++) {
933 vtd_init_unit(reg_base, inv_queue);
934 reg_base += DMAR_MMIO_SIZE;
935 inv_queue += PAGE_SIZE;
937 dmar_units_initialized = true;
939 if (cell_added_removed)
940 vtd_flush_domain_caches(cell_added_removed->id);
941 vtd_flush_domain_caches(root_cell.id);
945 static void vtd_restore_ir(unsigned int unit_no, void *reg_base)
947 struct vtd_emulation *unit = &root_cell_units[unit_no];
948 void *inv_queue = unit_inv_queue + unit_no * PAGE_SIZE;
949 void *root_inv_queue;
952 mmio_write64(reg_base + VTD_IRTA_REG, unit->irta);
953 vtd_update_gcmd_reg(reg_base, VTD_GCMD_SIRTP, 1);
954 vtd_submit_iq_request(reg_base, inv_queue, &inv_global_int);
956 vtd_update_gcmd_reg(reg_base, VTD_GCMD_QIE, 0);
957 mmio_write64(reg_base + VTD_IQT_REG, 0);
958 mmio_write64(reg_base + VTD_IQA_REG, unit->iqa);
959 vtd_update_gcmd_reg(reg_base, VTD_GCMD_QIE, 1);
962 * Restore invalidation queue head pointer by issuing dummy requests
963 * until the hardware is in sync with the Linux state again.
966 root_inv_queue = paging_get_guest_pages(NULL, unit->iqa, 1,
969 while (mmio_read64(reg_base + VTD_IQH_REG) != iqh)
970 vtd_submit_iq_request(reg_base, root_inv_queue, NULL);
972 printk("WARNING: Failed to restore invalidation queue head\n");
974 vtd_update_gcmd_reg(reg_base, VTD_GCMD_IRE, 1);
976 mmio_write32(reg_base + VTD_FEDATA_REG, unit->fedata);
977 mmio_write32(reg_base + VTD_FEADDR_REG, unit->feaddr);
978 mmio_write32(reg_base + VTD_FEUADDR_REG, unit->feuaddr);
979 mmio_write32(reg_base + VTD_FECTL_REG, unit->fectl);
982 void iommu_shutdown(void)
984 void *reg_base = dmar_reg_base;
987 if (dmar_units_initialized)
988 for (n = 0; n < dmar_units; n++, reg_base += DMAR_MMIO_SIZE) {
989 vtd_update_gcmd_reg(reg_base, VTD_GCMD_TE, 0);
990 vtd_update_gcmd_reg(reg_base, VTD_GCMD_IRE, 0);
991 if (root_cell.arch.vtd.ir_emulation)
992 vtd_restore_ir(n, reg_base);
994 vtd_update_gcmd_reg(reg_base, VTD_GCMD_QIE, 0);
998 bool iommu_cell_emulates_ir(struct cell *cell)
1000 return cell->arch.vtd.ir_emulation;