#include <asm/apic.h>
#include <asm/control.h>
#include <asm/ioapic.h>
-#include <asm/vmx.h>
-#include <asm/vtd.h>
+#include <asm/iommu.h>
+#include <asm/vcpu.h>
struct exception_frame {
u64 vector;
{
int err;
- err = vmx_cell_init(cell);
+ err = vcpu_cell_init(cell);
if (err)
return err;
- err = vtd_cell_init(cell);
+ err = iommu_cell_init(cell);
if (err)
- goto error_vmx_exit;
+ goto error_vm_exit;
err = pci_cell_init(cell);
if (err)
- goto error_vtd_exit;
+ goto error_iommu_exit;
ioapic_cell_init(cell);
return 0;
-error_vtd_exit:
- vtd_cell_exit(cell);
-error_vmx_exit:
- vmx_cell_exit(cell);
+error_iommu_exit:
+ iommu_cell_exit(cell);
+error_vm_exit:
+ vcpu_cell_exit(cell);
return err;
}
{
int err;
- err = vmx_map_memory_region(cell, mem);
+ err = vcpu_map_memory_region(cell, mem);
if (err)
return err;
- err = vtd_map_memory_region(cell, mem);
+ err = iommu_map_memory_region(cell, mem);
if (err)
- vmx_unmap_memory_region(cell, mem);
+ vcpu_unmap_memory_region(cell, mem);
return err;
}
{
int err;
- err = vtd_unmap_memory_region(cell, mem);
+ err = iommu_unmap_memory_region(cell, mem);
if (err)
return err;
- return vmx_unmap_memory_region(cell, mem);
+ return vcpu_unmap_memory_region(cell, mem);
}
void arch_cell_destroy(struct cell *cell)
{
ioapic_cell_exit(cell);
pci_cell_exit(cell);
- vtd_cell_exit(cell);
- vmx_cell_exit(cell);
+ iommu_cell_exit(cell);
+ vcpu_cell_exit(cell);
}
/* all root cell CPUs (except the calling one) have to be suspended */
current_cpu)
per_cpu(cpu)->flush_virt_caches = true;
- vmx_invept();
+ vcpu_tlb_flush();
- vtd_config_commit(cell_added_removed);
+ iommu_config_commit(cell_added_removed);
pci_config_commit(cell_added_removed);
ioapic_config_commit(cell_added_removed);
}
pci_prepare_handover();
ioapic_prepare_handover();
- vtd_shutdown();
+ iommu_shutdown();
pci_shutdown();
ioapic_shutdown();
}
if (cpu_data->shutdown_cpu) {
apic_clear(cpu_data);
- vmx_cpu_exit(cpu_data);
+ vcpu_exit(cpu_data);
asm volatile("1: hlt; jmp 1b");
}
if (cpu_data->flush_virt_caches) {
cpu_data->flush_virt_caches = false;
- vmx_invept();
+ vcpu_tlb_flush();
}
spin_unlock(&cpu_data->control_lock);
/* wait_for_sipi is only modified on this CPU, so checking outside of
* control_lock is fine */
if (cpu_data->wait_for_sipi)
- vmx_cpu_park(cpu_data);
+ vcpu_park(cpu_data);
else if (sipi_vector >= 0)
apic_clear(cpu_data);
x86_enter_wait_for_sipi(cpu_data);
spin_unlock(&cpu_data->control_lock);
- vmx_cpu_park(cpu_data);
+ vcpu_park(cpu_data);
}
mov %rsp,%rdi
lea -PERCPU_STACK_END+16*8(%rsp),%rsi
- call vmx_handle_exit
+ call vcpu_handle_exit
pop %r15
pop %r14
vmresume
lea -PERCPU_STACK_END(%rsp),%rdi
- jmp vmx_entry_failure
+ jmp vcpu_entry_failure
--- /dev/null
+/*
+ * Jailhouse, a Linux-based partitioning hypervisor
+ *
+ * Copyright (c) Valentine Sinitsyn, 2014
+ *
+ * Authors:
+ * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef _JAILHOUSE_ASM_IOMMU_H
+#define _JAILHOUSE_ASM_IOMMU_H
+
+#include <jailhouse/entry.h>
+#include <jailhouse/cell-config.h>
+#include <jailhouse/pci.h>
+#include <jailhouse/types.h>
+#include <asm/apic.h>
+#include <asm/cell.h>
+#include <asm/percpu.h>
+
+int iommu_init(void);
+
+int iommu_cell_init(struct cell *cell);
+int iommu_map_memory_region(struct cell *cell,
+ const struct jailhouse_memory *mem);
+int iommu_unmap_memory_region(struct cell *cell,
+ const struct jailhouse_memory *mem);
+int iommu_add_pci_device(struct cell *cell, struct pci_device *device);
+void iommu_remove_pci_device(struct pci_device *device);
+
+struct apic_irq_message iommu_get_remapped_root_int(unsigned int iommu,
+ u16 device_id,
+ unsigned int vector,
+ unsigned int remap_index);
+int iommu_map_interrupt(struct cell *cell,
+ u16 device_id,
+ unsigned int vector,
+ struct apic_irq_message irq_msg);
+
+void iommu_cell_exit(struct cell *cell);
+
+void iommu_config_commit(struct cell *cell_added_removed);
+
+void iommu_shutdown(void);
+
+void iommu_check_pending_faults(struct per_cpu *cpu_data);
+
+int iommu_mmio_access_handler(bool is_write, u64 addr, u32 *value);
+
+#endif
--- /dev/null
+/*
+ * Jailhouse, a Linux-based partitioning hypervisor
+ *
+ * Copyright (c) Valentine Sinitsyn, 2014
+ *
+ * Authors:
+ * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef _JAILHOUSE_ASM_VCPU_H
+#define _JAILHOUSE_ASM_VCPU_H
+
+#include <jailhouse/entry.h>
+#include <jailhouse/cell-config.h>
+#include <asm/cell.h>
+#include <asm/percpu.h>
+#include <asm/processor.h>
+
+int vcpu_vendor_init(void);
+
+int vcpu_cell_init(struct cell *cell);
+int vcpu_map_memory_region(struct cell *cell,
+ const struct jailhouse_memory *mem);
+int vcpu_unmap_memory_region(struct cell *cell,
+ const struct jailhouse_memory *mem);
+void vcpu_cell_exit(struct cell *cell);
+
+int vcpu_init(struct per_cpu *cpu_data);
+void vcpu_exit(struct per_cpu *cpu_data);
+
+void __attribute__((noreturn)) vcpu_activate_vmm(struct per_cpu *cpu_data);
+void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data);
+
+void vcpu_park(struct per_cpu *cpu_data);
+
+void vcpu_nmi_handler(struct per_cpu *cpu_data);
+
+void vcpu_tlb_flush(void);
+
+void vcpu_entry_failure(struct per_cpu *cpu_data);
+
+#endif
#ifndef _JAILHOUSE_ASM_VMX_H
#define _JAILHOUSE_ASM_VMX_H
-#include <asm/cell.h>
#include <asm/paging.h>
-#include <asm/processor.h>
-
-#include <jailhouse/cell-config.h>
struct per_cpu;
#define APIC_ACCESS_TYPE_LINEAR_READ 0x00000000
#define APIC_ACCESS_TYPE_LINEAR_WRITE 0x00001000
-int vmx_init(void);
-
-int vmx_cell_init(struct cell *cell);
-int vmx_map_memory_region(struct cell *cell,
- const struct jailhouse_memory *mem);
-int vmx_unmap_memory_region(struct cell *cell,
- const struct jailhouse_memory *mem);
-void vmx_cell_exit(struct cell *cell);
-
-int vmx_cpu_init(struct per_cpu *cpu_data);
-void vmx_cpu_exit(struct per_cpu *cpu_data);
-
-void __attribute__((noreturn)) vmx_cpu_activate_vmm(struct per_cpu *cpu_data);
-void vmx_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data);
-void vmx_entry_failure(struct per_cpu *cpu_data);
-
-void vmx_invept(void);
-
void vmx_schedule_vmexit(struct per_cpu *cpu_data);
-void vmx_cpu_park(struct per_cpu *cpu_data);
void vmx_vmexit(void);
* the COPYING file in the top-level directory.
*/
+#ifndef _JAILHOUSE_ASM_VTD_H
+#define _JAILHOUSE_ASM_VTD_H
+
#include <jailhouse/pci.h>
#include <jailhouse/utils.h>
#include <asm/apic.h>
#define VTD_IRTE_SQ_VERIFY_FULL_SID 0x0
#define VTD_IRTE_SVT_VERIFY_SID_SQ 0x1
-int vtd_init(void);
-
-int vtd_cell_init(struct cell *cell);
-int vtd_map_memory_region(struct cell *cell,
- const struct jailhouse_memory *mem);
-int vtd_unmap_memory_region(struct cell *cell,
- const struct jailhouse_memory *mem);
-int vtd_add_pci_device(struct cell *cell, struct pci_device *device);
-void vtd_remove_pci_device(struct pci_device *device);
-struct apic_irq_message
-vtd_get_remapped_root_int(unsigned int iommu, u16 device_id,
- unsigned int vector, unsigned int remap_index);
-int vtd_map_interrupt(struct cell *cell, u16 device_id, unsigned int vector,
- struct apic_irq_message irq_msg);
-void vtd_cell_exit(struct cell *cell);
-
-void vtd_config_commit(struct cell *cell_added_removed);
-
-void vtd_shutdown(void);
-
-void vtd_check_pending_faults(struct per_cpu *cpu_data);
-
-int vtd_mmio_access_handler(bool is_write, u64 addr, u32 *value);
+#endif
#include <jailhouse/printk.h>
#include <asm/apic.h>
#include <asm/ioapic.h>
+#include <asm/iommu.h>
#include <asm/spinlock.h>
-#include <asm/vtd.h>
#include <jailhouse/cell-config.h>
idx = entry.remap.int_index | (entry.remap.int_index15 << 15);
- return vtd_get_remapped_root_int(root_cell.ioapic_iommu,
- root_cell.ioapic_id, pin,
- idx);
+ return iommu_get_remapped_root_int(root_cell.ioapic_iommu,
+ root_cell.ioapic_id, pin,
+ idx);
}
irq_msg.vector = entry.native.vector;
irq_msg.delivery_mode = entry.native.delivery_mode;
irq_msg.level_triggered = entry.native.level_triggered;
irq_msg.dest_logical = entry.native.dest_logical;
- /* align redir_hint and dest_logical - required by vtd_map_interrupt */
+ /* align redir_hint and dest_logical - required by iommu_map_interrupt */
irq_msg.redir_hint = irq_msg.dest_logical;
irq_msg.valid = 1;
irq_msg.destination = entry.native.destination;
irq_msg = ioapic_translate_redir_entry(cell, pin, entry);
- result = vtd_map_interrupt(cell, cell->ioapic_id, pin, irq_msg);
+ result = iommu_map_interrupt(cell, cell->ioapic_id, pin, irq_msg);
// HACK for QEMU
if (result == -ENOSYS) {
ioapic_reg_write(reg, entry.raw[reg & 1]);
#include <jailhouse/utils.h>
#include <asm/apic.h>
#include <asm/io.h>
+#include <asm/iommu.h>
#include <asm/pci.h>
-#include <asm/vtd.h>
/** Protects the root bridge's PIO interface to the PCI config space. */
static DEFINE_SPINLOCK(pci_lock);
int arch_pci_add_device(struct cell *cell, struct pci_device *device)
{
- return vtd_add_pci_device(cell, device);
+ return iommu_add_pci_device(cell, device);
}
void arch_pci_remove_device(struct pci_device *device)
{
- vtd_remove_pci_device(device);
+ iommu_remove_pci_device(device);
}
static union x86_msi_vector pci_get_x86_msi_vector(struct pci_device *device)
idx = msi.remap.int_index | (msi.remap.int_index15 << 15);
if (msi.remap.shv)
idx += msi.remap.subhandle;
- return vtd_get_remapped_root_int(device->info->iommu,
- device->info->bdf,
- vector, idx);
+ return iommu_get_remapped_root_int(device->info->iommu,
+ device->info->bdf,
+ vector, idx);
}
irq_msg.vector = msi.native.vector;
for (n = 0; n < vectors; n++) {
irq_msg = pci_translate_msi_vector(device, n, vectors, msi);
- result = vtd_map_interrupt(device->cell, bdf, n, irq_msg);
+ result = iommu_map_interrupt(device->cell, bdf, n, irq_msg);
// HACK for QEMU
if (result == -ENOSYS) {
for (n = 1; n < (info->msi_64bits ? 4 : 3); n++)
return 0;
irq_msg = pci_translate_msi_vector(device, index, 0, msi);
- result = vtd_map_interrupt(device->cell, device->info->bdf, index,
+ result = iommu_map_interrupt(device->cell, device->info->bdf, index,
irq_msg);
// HACK for QEMU
if (result == -ENOSYS) {
#include <asm/apic.h>
#include <asm/bitops.h>
#include <asm/ioapic.h>
-#include <asm/vmx.h>
-#include <asm/vtd.h>
+#include <asm/iommu.h>
+#include <asm/vcpu.h>
#define IDT_PRESENT_INT 0x00008e00
for (vector = IRQ_DESC_START; vector < NUM_IDT_DESC; vector++)
set_idt_int_gate(vector, (unsigned long)irq_entry);
- err = vmx_init();
+ err = vcpu_vendor_init();
if (err)
return err;
return 0;
}
+/*
+ * TODO: Current struct segment is VMX-specific (with 32-bit access rights).
+ * We need a generic struct segment for x86 that is converted to VMX/SVM one
+ * in the vmx.c/svm.c.
+ */
static void read_descriptor(struct per_cpu *cpu_data, struct segment *seg)
{
u64 *desc = (u64 *)(cpu_data->linux_gdtr.base +
if (err)
goto error_out;
- err = vmx_cpu_init(cpu_data);
+ err = vcpu_init(cpu_data);
if (err)
goto error_out;
{
int err;
- err = vtd_init();
+ err = iommu_init();
if (err)
return err;
void arch_cpu_activate_vmm(struct per_cpu *cpu_data)
{
- vmx_cpu_activate_vmm(cpu_data);
+ vcpu_activate_vmm(cpu_data);
}
void arch_cpu_restore(struct per_cpu *cpu_data)
if (!cpu_data->initialized)
return;
- vmx_cpu_exit(cpu_data);
+ vcpu_exit(cpu_data);
write_msr(MSR_EFER, cpu_data->linux_efer);
write_cr3(cpu_data->linux_cr3);
#include <asm/i8042.h>
#include <asm/io.h>
#include <asm/ioapic.h>
+#include <asm/iommu.h>
#include <asm/pci.h>
+#include <asm/vcpu.h>
#include <asm/vmx.h>
#include <asm/vtd.h>
EPT_FLAG_WRITE | EPT_FLAG_EXECUTE;
}
-int vmx_init(void)
+int vcpu_vendor_init(void)
{
unsigned int n;
int err;
msr_bitmap[VMX_MSR_BMP_0000_WRITE][MSR_X2APIC_ICR/8] = 0x01;
}
- return vmx_cell_init(&root_cell);
+ return vcpu_cell_init(&root_cell);
}
unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
flags);
}
-int vmx_cell_init(struct cell *cell)
+int vcpu_cell_init(struct cell *cell)
{
const u8 *pio_bitmap = jailhouse_cell_pio_bitmap(cell->config);
u32 pio_bitmap_size = cell->config->pio_bitmap_size;
EPT_FLAG_READ | EPT_FLAG_WRITE|EPT_FLAG_WB_TYPE,
PAGING_NON_COHERENT);
if (err) {
- vmx_cell_exit(cell);
+ vcpu_cell_exit(cell);
return err;
}
return 0;
}
-int vmx_map_memory_region(struct cell *cell,
+int vcpu_map_memory_region(struct cell *cell,
const struct jailhouse_memory *mem)
{
u64 phys_start = mem->phys_start;
mem->virt_start, flags, PAGING_NON_COHERENT);
}
-int vmx_unmap_memory_region(struct cell *cell,
+int vcpu_unmap_memory_region(struct cell *cell,
const struct jailhouse_memory *mem)
{
return paging_destroy(&cell->vmx.ept_structs, mem->virt_start,
mem->size, PAGING_NON_COHERENT);
}
-void vmx_cell_exit(struct cell *cell)
+void vcpu_cell_exit(struct cell *cell)
{
const u8 *root_pio_bitmap =
jailhouse_cell_pio_bitmap(root_cell.config);
page_free(&mem_pool, cell->vmx.ept_structs.root_table, 1);
}
-void vmx_invept(void)
+void vcpu_tlb_flush(void)
{
unsigned long ept_cap = read_msr(MSR_IA32_VMX_EPT_VPID_CAP);
struct {
return ok;
}
-static bool vmx_set_cell_config(struct cell *cell)
+static bool vcpu_set_cell_config(struct cell *cell)
{
u8 *io_bitmap;
bool ok = true;
ok &= vmcs_write64(APIC_ACCESS_ADDR,
paging_hvirt2phys(apic_access_page));
- ok &= vmx_set_cell_config(cpu_data->cell);
+ ok &= vcpu_set_cell_config(cpu_data->cell);
ok &= vmcs_write32(EXCEPTION_BITMAP, 0);
return ok;
}
-int vmx_cpu_init(struct per_cpu *cpu_data)
+int vcpu_init(struct per_cpu *cpu_data)
{
unsigned long cr4, feature_ctrl, mask;
u32 revision_id;
return 0;
}
-void vmx_cpu_exit(struct per_cpu *cpu_data)
+void vcpu_exit(struct per_cpu *cpu_data)
{
if (cpu_data->vmx_state == VMXOFF)
return;
write_cr4(read_cr4() & ~X86_CR4_VMXE);
}
-void vmx_cpu_activate_vmm(struct per_cpu *cpu_data)
+void vcpu_activate_vmm(struct per_cpu *cpu_data)
{
/* We enter Linux at the point arch_entry would return to as well.
* rax is cleared to signal success to the caller. */
__builtin_unreachable();
}
-static void vmx_cpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
+static void vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
{
unsigned long val;
bool ok = true;
val &= ~VM_ENTRY_IA32E_MODE;
ok &= vmcs_write32(VM_ENTRY_CONTROLS, val);
- ok &= vmx_set_cell_config(cpu_data->cell);
+ ok &= vcpu_set_cell_config(cpu_data->cell);
if (!ok) {
panic_printk("FATAL: CPU reset failed\n");
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, pin_based_ctrl);
}
-void vmx_cpu_park(struct per_cpu *cpu_data)
+void vcpu_park(struct per_cpu *cpu_data)
{
- vmx_cpu_reset(cpu_data, 0);
+ vcpu_reset(cpu_data, 0);
vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_HLT);
}
vmcs_read32(VM_ENTRY_CONTROLS) | VM_ENTRY_IA32E_MODE);
}
-static void vmx_handle_hypercall(struct registers *guest_regs)
+static void vcpu_handle_hypercall(struct registers *guest_regs)
{
bool ia32e_mode = !!(vmcs_read64(GUEST_IA32_EFER) & EFER_LMA);
unsigned long arg_mask = ia32e_mode ? (u64)-1 : (u32)-1;
panic_printk("EFER: %p\n", vmcs_read64(GUEST_IA32_EFER));
}
-static bool vmx_handle_io_access(struct registers *guest_regs,
+static bool vcpu_handle_io_access(struct registers *guest_regs,
struct per_cpu *cpu_data)
{
/* parse exit qualification for I/O instructions (see SDM, 27.2.1 ) */
return false;
}
-static bool vmx_handle_ept_violation(struct registers *guest_regs,
+static bool vcpu_handle_pt_violation(struct registers *guest_regs,
struct per_cpu *cpu_data)
{
u64 phys_addr = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
result = pci_mmio_access_handler(cpu_data->cell, is_write,
phys_addr, &val);
if (result == 0)
- result = vtd_mmio_access_handler(is_write, phys_addr, &val);
+ result = iommu_mmio_access_handler(is_write, phys_addr, &val);
if (result == 1) {
if (!is_write)
return false;
}
-void vmx_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
+void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
{
u32 reason = vmcs_read32(VM_EXIT_REASON);
int sipi_vector;
if (sipi_vector >= 0) {
printk("CPU %d received SIPI, vector %x\n",
cpu_data->cpu_id, sipi_vector);
- vmx_cpu_reset(cpu_data, sipi_vector);
+ vcpu_reset(cpu_data, sipi_vector);
memset(guest_regs, 0, sizeof(*guest_regs));
}
- vtd_check_pending_faults(cpu_data);
+ iommu_check_pending_faults(cpu_data);
return;
case EXIT_REASON_CPUID:
vmx_skip_emulated_instruction(X86_INST_LEN_CPUID);
(u32 *)&guest_regs->rcx, (u32 *)&guest_regs->rdx);
return;
case EXIT_REASON_VMCALL:
- vmx_handle_hypercall(guest_regs);
+ vcpu_handle_hypercall(guest_regs);
return;
case EXIT_REASON_CR_ACCESS:
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
break;
case EXIT_REASON_IO_INSTRUCTION:
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
- if (vmx_handle_io_access(guest_regs, cpu_data))
+ if (vcpu_handle_io_access(guest_regs, cpu_data))
return;
break;
case EXIT_REASON_EPT_VIOLATION:
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
- if (vmx_handle_ept_violation(guest_regs, cpu_data))
+ if (vcpu_handle_pt_violation(guest_regs, cpu_data))
return;
break;
default:
panic_park();
}
-void vmx_entry_failure(struct per_cpu *cpu_data)
+void vcpu_entry_failure(struct per_cpu *cpu_data)
{
panic_printk("FATAL: vmresume failed, error %d\n",
vmcs_read32(VM_INSTRUCTION_ERROR));
* Jailhouse, a Linux-based partitioning hypervisor
*
* Copyright (c) Siemens AG, 2013, 2014
+ * Copyright (c) Valentine Sinitsyn, 2014
*
* Authors:
* Jan Kiszka <jan.kiszka@siemens.com>
+ * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
#include <jailhouse/pci.h>
#include <jailhouse/printk.h>
#include <jailhouse/string.h>
+#include <asm/vtd.h>
+#include <asm/apic.h>
+#include <asm/iommu.h>
#include <asm/bitops.h>
#include <asm/ioapic.h>
#include <asm/spinlock.h>
printk(" Fault Reason: 0x%x Fault Info: %lx Type %d\n", fr, fi, type);
}
-void vtd_check_pending_faults(struct per_cpu *cpu_data)
+void iommu_check_pending_faults(struct per_cpu *cpu_data)
{
unsigned int fr_index;
void *reg_base = dmar_reg_base;
if (!irte_usage->used)
return 0;
- irq_msg = vtd_get_remapped_root_int(unit_no, irte_usage->device_id,
+ irq_msg = iommu_get_remapped_root_int(unit_no, irte_usage->device_id,
irte_usage->vector, index);
- return vtd_map_interrupt(&root_cell, irte_usage->device_id,
+ return iommu_map_interrupt(&root_cell, irte_usage->device_id,
irte_usage->vector, irq_msg);
}
return -1;
}
-int vtd_mmio_access_handler(bool is_write, u64 addr, u32 *value)
+int iommu_mmio_access_handler(bool is_write, u64 addr, u32 *value)
{
unsigned int n;
u64 base_addr;
return 0;
}
-int vtd_init(void)
+int iommu_init(void)
{
unsigned long version, caps, ecaps, ctrls, sllps_caps = ~0UL;
unsigned int pt_levels, num_did, n;
if (!(sllps_caps & VTD_CAP_SLLPS2M))
vtd_paging[dmar_pt_levels - 2].page_size = 0;
- return vtd_cell_init(&root_cell);
+ return iommu_cell_init(&root_cell);
}
static void vtd_update_irte(unsigned int index, union vtd_irte content)
}
}
-int vtd_add_pci_device(struct cell *cell, struct pci_device *device)
+int iommu_add_pci_device(struct cell *cell, struct pci_device *device)
{
unsigned int max_vectors = MAX(device->info->num_msi_vectors,
device->info->num_msix_vectors);
return -ENOMEM;
}
-void vtd_remove_pci_device(struct pci_device *device)
+void iommu_remove_pci_device(struct pci_device *device)
{
u16 bdf = device->info->bdf;
u64 *root_entry_lo = &root_entry_table[PCI_BUS(bdf)].lo_word;
page_free(&mem_pool, context_entry_table, 1);
}
-int vtd_cell_init(struct cell *cell)
+int iommu_cell_init(struct cell *cell)
{
const struct jailhouse_irqchip *irqchip =
jailhouse_cell_irqchips(cell->config);
result = vtd_reserve_int_remap_region(irqchip->id,
IOAPIC_NUM_PINS);
if (result < 0) {
- vtd_cell_exit(cell);
+ iommu_cell_exit(cell);
return result;
}
}
return 0;
}
-int vtd_map_memory_region(struct cell *cell,
+int iommu_map_memory_region(struct cell *cell,
const struct jailhouse_memory *mem)
{
u32 flags = 0;
PAGING_COHERENT);
}
-int vtd_unmap_memory_region(struct cell *cell,
+int iommu_unmap_memory_region(struct cell *cell,
const struct jailhouse_memory *mem)
{
// HACK for QEMU
}
struct apic_irq_message
-vtd_get_remapped_root_int(unsigned int iommu, u16 device_id,
+iommu_get_remapped_root_int(unsigned int iommu, u16 device_id,
unsigned int vector, unsigned int remap_index)
{
struct vtd_emulation *unit = &root_cell_units[iommu];
return irq_msg;
}
-int vtd_map_interrupt(struct cell *cell, u16 device_id, unsigned int vector,
+int iommu_map_interrupt(struct cell *cell, u16 device_id, unsigned int vector,
struct apic_irq_message irq_msg)
{
u32 dest = irq_msg.destination;
return base_index + vector;
}
-void vtd_cell_exit(struct cell *cell)
+void iommu_cell_exit(struct cell *cell)
{
// HACK for QEMU
if (dmar_units == 0)
*/
}
-void vtd_config_commit(struct cell *cell_added_removed)
+void iommu_config_commit(struct cell *cell_added_removed)
{
void *inv_queue = unit_inv_queue;
void *reg_base = dmar_reg_base;
mmio_write32(reg_base + VTD_FECTL_REG, unit->fectl);
}
-void vtd_shutdown(void)
+void iommu_shutdown(void)
{
void *reg_base = dmar_reg_base;
unsigned int n;