]> rtime.felk.cvut.cz Git - jailhouse.git/commitdiff
x86: Rename VMX/VTD public functions
authorValentine Sinitsyn <valentine.sinitsyn@gmail.com>
Sat, 17 May 2014 18:12:45 +0000 (00:12 +0600)
committerJan Kiszka <jan.kiszka@siemens.com>
Fri, 10 Oct 2014 11:47:37 +0000 (13:47 +0200)
In preparation to support different vendor-specific implementations for
virtualization features, public functions for VMX/VTD were renamed.
"vmx_" and "vtd_" prefixes are now superseded with "vcpu_" and "iommu_",
and new header files were introduced to hold the declarations.

Signed-off-by: Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
hypervisor/arch/x86/control.c
hypervisor/arch/x86/entry.S
hypervisor/arch/x86/include/asm/iommu.h [new file with mode: 0644]
hypervisor/arch/x86/include/asm/vcpu.h [new file with mode: 0644]
hypervisor/arch/x86/include/asm/vmx.h
hypervisor/arch/x86/include/asm/vtd.h
hypervisor/arch/x86/ioapic.c
hypervisor/arch/x86/pci.c
hypervisor/arch/x86/setup.c
hypervisor/arch/x86/vmx.c
hypervisor/arch/x86/vtd.c

index b775b140e7e233584c084f9a6a59197261e8e995..8a8d9ca83b6dabe952a98daaf93360e4df539a2b 100644 (file)
@@ -17,8 +17,8 @@
 #include <asm/apic.h>
 #include <asm/control.h>
 #include <asm/ioapic.h>
-#include <asm/vmx.h>
-#include <asm/vtd.h>
+#include <asm/iommu.h>
+#include <asm/vcpu.h>
 
 struct exception_frame {
        u64 vector;
@@ -34,17 +34,17 @@ int arch_cell_create(struct cell *cell)
 {
        int err;
 
-       err = vmx_cell_init(cell);
+       err = vcpu_cell_init(cell);
        if (err)
                return err;
 
-       err = vtd_cell_init(cell);
+       err = iommu_cell_init(cell);
        if (err)
-               goto error_vmx_exit;
+               goto error_vm_exit;
 
        err = pci_cell_init(cell);
        if (err)
-               goto error_vtd_exit;
+               goto error_iommu_exit;
 
        ioapic_cell_init(cell);
 
@@ -53,10 +53,10 @@ int arch_cell_create(struct cell *cell)
 
        return 0;
 
-error_vtd_exit:
-       vtd_cell_exit(cell);
-error_vmx_exit:
-       vmx_cell_exit(cell);
+error_iommu_exit:
+       iommu_cell_exit(cell);
+error_vm_exit:
+       vcpu_cell_exit(cell);
        return err;
 }
 
@@ -65,13 +65,13 @@ int arch_map_memory_region(struct cell *cell,
 {
        int err;
 
-       err = vmx_map_memory_region(cell, mem);
+       err = vcpu_map_memory_region(cell, mem);
        if (err)
                return err;
 
-       err = vtd_map_memory_region(cell, mem);
+       err = iommu_map_memory_region(cell, mem);
        if (err)
-               vmx_unmap_memory_region(cell, mem);
+               vcpu_unmap_memory_region(cell, mem);
        return err;
 }
 
@@ -80,19 +80,19 @@ int arch_unmap_memory_region(struct cell *cell,
 {
        int err;
 
-       err = vtd_unmap_memory_region(cell, mem);
+       err = iommu_unmap_memory_region(cell, mem);
        if (err)
                return err;
 
-       return vmx_unmap_memory_region(cell, mem);
+       return vcpu_unmap_memory_region(cell, mem);
 }
 
 void arch_cell_destroy(struct cell *cell)
 {
        ioapic_cell_exit(cell);
        pci_cell_exit(cell);
-       vtd_cell_exit(cell);
-       vmx_cell_exit(cell);
+       iommu_cell_exit(cell);
+       vcpu_cell_exit(cell);
 }
 
 /* all root cell CPUs (except the calling one) have to be suspended */
@@ -108,9 +108,9 @@ void arch_config_commit(struct cell *cell_added_removed)
                                    current_cpu)
                        per_cpu(cpu)->flush_virt_caches = true;
 
-       vmx_invept();
+       vcpu_tlb_flush();
 
-       vtd_config_commit(cell_added_removed);
+       iommu_config_commit(cell_added_removed);
        pci_config_commit(cell_added_removed);
        ioapic_config_commit(cell_added_removed);
 }
@@ -120,7 +120,7 @@ void arch_shutdown(void)
        pci_prepare_handover();
        ioapic_prepare_handover();
 
-       vtd_shutdown();
+       iommu_shutdown();
        pci_shutdown();
        ioapic_shutdown();
 }
@@ -227,7 +227,7 @@ int x86_handle_events(struct per_cpu *cpu_data)
 
                if (cpu_data->shutdown_cpu) {
                        apic_clear(cpu_data);
-                       vmx_cpu_exit(cpu_data);
+                       vcpu_exit(cpu_data);
                        asm volatile("1: hlt; jmp 1b");
                }
 
@@ -246,7 +246,7 @@ int x86_handle_events(struct per_cpu *cpu_data)
 
        if (cpu_data->flush_virt_caches) {
                cpu_data->flush_virt_caches = false;
-               vmx_invept();
+               vcpu_tlb_flush();
        }
 
        spin_unlock(&cpu_data->control_lock);
@@ -254,7 +254,7 @@ int x86_handle_events(struct per_cpu *cpu_data)
        /* wait_for_sipi is only modified on this CPU, so checking outside of
         * control_lock is fine */
        if (cpu_data->wait_for_sipi)
-               vmx_cpu_park(cpu_data);
+               vcpu_park(cpu_data);
        else if (sipi_vector >= 0)
                apic_clear(cpu_data);
 
@@ -290,5 +290,5 @@ void arch_panic_park(void)
        x86_enter_wait_for_sipi(cpu_data);
        spin_unlock(&cpu_data->control_lock);
 
-       vmx_cpu_park(cpu_data);
+       vcpu_park(cpu_data);
 }
index b339518de1699f1683a0a4e0a90a100960f0308f..05ed17c86d2141efb56d41b120bbc347b9f24a41 100644 (file)
@@ -159,7 +159,7 @@ vmx_vmexit:
 
        mov %rsp,%rdi
        lea -PERCPU_STACK_END+16*8(%rsp),%rsi
-       call vmx_handle_exit
+       call vcpu_handle_exit
 
        pop %r15
        pop %r14
@@ -181,4 +181,4 @@ vmx_vmexit:
        vmresume
 
        lea -PERCPU_STACK_END(%rsp),%rdi
-       jmp vmx_entry_failure
+       jmp vcpu_entry_failure
diff --git a/hypervisor/arch/x86/include/asm/iommu.h b/hypervisor/arch/x86/include/asm/iommu.h
new file mode 100644 (file)
index 0000000..ed19c4e
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Jailhouse, a Linux-based partitioning hypervisor
+ *
+ * Copyright (c) Valentine Sinitsyn, 2014
+ *
+ * Authors:
+ *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef _JAILHOUSE_ASM_IOMMU_H
+#define _JAILHOUSE_ASM_IOMMU_H
+
+#include <jailhouse/entry.h>
+#include <jailhouse/cell-config.h>
+#include <jailhouse/pci.h>
+#include <jailhouse/types.h>
+#include <asm/apic.h>
+#include <asm/cell.h>
+#include <asm/percpu.h>
+
+int iommu_init(void);
+
+int iommu_cell_init(struct cell *cell);
+int iommu_map_memory_region(struct cell *cell,
+                           const struct jailhouse_memory *mem);
+int iommu_unmap_memory_region(struct cell *cell,
+                           const struct jailhouse_memory *mem);
+int iommu_add_pci_device(struct cell *cell, struct pci_device *device);
+void iommu_remove_pci_device(struct pci_device *device);
+
+struct apic_irq_message iommu_get_remapped_root_int(unsigned int iommu,
+                                                   u16 device_id,
+                                                   unsigned int vector,
+                                                   unsigned int remap_index);
+int iommu_map_interrupt(struct cell *cell,
+                       u16 device_id,
+                       unsigned int vector,
+                       struct apic_irq_message irq_msg);
+
+void iommu_cell_exit(struct cell *cell);
+
+void iommu_config_commit(struct cell *cell_added_removed);
+
+void iommu_shutdown(void);
+
+void iommu_check_pending_faults(struct per_cpu *cpu_data);
+
+int iommu_mmio_access_handler(bool is_write, u64 addr, u32 *value);
+
+#endif
diff --git a/hypervisor/arch/x86/include/asm/vcpu.h b/hypervisor/arch/x86/include/asm/vcpu.h
new file mode 100644 (file)
index 0000000..b591954
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Jailhouse, a Linux-based partitioning hypervisor
+ *
+ * Copyright (c) Valentine Sinitsyn, 2014
+ *
+ * Authors:
+ *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef _JAILHOUSE_ASM_VCPU_H
+#define _JAILHOUSE_ASM_VCPU_H
+
+#include <jailhouse/entry.h>
+#include <jailhouse/cell-config.h>
+#include <asm/cell.h>
+#include <asm/percpu.h>
+#include <asm/processor.h>
+
+int vcpu_vendor_init(void);
+
+int vcpu_cell_init(struct cell *cell);
+int vcpu_map_memory_region(struct cell *cell,
+                          const struct jailhouse_memory *mem);
+int vcpu_unmap_memory_region(struct cell *cell,
+                            const struct jailhouse_memory *mem);
+void vcpu_cell_exit(struct cell *cell);
+
+int vcpu_init(struct per_cpu *cpu_data);
+void vcpu_exit(struct per_cpu *cpu_data);
+
+void __attribute__((noreturn)) vcpu_activate_vmm(struct per_cpu *cpu_data);
+void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data);
+
+void vcpu_park(struct per_cpu *cpu_data);
+
+void vcpu_nmi_handler(struct per_cpu *cpu_data);
+
+void vcpu_tlb_flush(void);
+
+void vcpu_entry_failure(struct per_cpu *cpu_data);
+
+#endif
index cafe215c80f67d66fcc76554fbd89f0533bd7e95..62d4bd04c290138405121baf5fc1cf0b3e6f56fe 100644 (file)
 #ifndef _JAILHOUSE_ASM_VMX_H
 #define _JAILHOUSE_ASM_VMX_H
 
-#include <asm/cell.h>
 #include <asm/paging.h>
-#include <asm/processor.h>
-
-#include <jailhouse/cell-config.h>
 
 struct per_cpu;
 
@@ -301,26 +297,7 @@ enum vmx_state { VMXOFF = 0, VMXON, VMCS_READY };
 #define APIC_ACCESS_TYPE_LINEAR_READ           0x00000000
 #define APIC_ACCESS_TYPE_LINEAR_WRITE          0x00001000
 
-int vmx_init(void);
-
-int vmx_cell_init(struct cell *cell);
-int vmx_map_memory_region(struct cell *cell,
-                         const struct jailhouse_memory *mem);
-int vmx_unmap_memory_region(struct cell *cell,
-                           const struct jailhouse_memory *mem);
-void vmx_cell_exit(struct cell *cell);
-
-int vmx_cpu_init(struct per_cpu *cpu_data);
-void vmx_cpu_exit(struct per_cpu *cpu_data);
-
-void __attribute__((noreturn)) vmx_cpu_activate_vmm(struct per_cpu *cpu_data);
-void vmx_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data);
-void vmx_entry_failure(struct per_cpu *cpu_data);
-
-void vmx_invept(void);
-
 void vmx_schedule_vmexit(struct per_cpu *cpu_data);
-void vmx_cpu_park(struct per_cpu *cpu_data);
 
 void vmx_vmexit(void);
 
index 1d401a93e86f1d7c41b744e2322577b0ecfbe42a..9e901ec2656733b4c02099fefa098deaaa086a19 100644 (file)
@@ -10,6 +10,9 @@
  * the COPYING file in the top-level directory.
  */
 
+#ifndef _JAILHOUSE_ASM_VTD_H
+#define _JAILHOUSE_ASM_VTD_H
+
 #include <jailhouse/pci.h>
 #include <jailhouse/utils.h>
 #include <asm/apic.h>
@@ -160,26 +163,4 @@ union vtd_irte {
 #define VTD_IRTE_SQ_VERIFY_FULL_SID    0x0
 #define VTD_IRTE_SVT_VERIFY_SID_SQ     0x1
 
-int vtd_init(void);
-
-int vtd_cell_init(struct cell *cell);
-int vtd_map_memory_region(struct cell *cell,
-                         const struct jailhouse_memory *mem);
-int vtd_unmap_memory_region(struct cell *cell,
-                           const struct jailhouse_memory *mem);
-int vtd_add_pci_device(struct cell *cell, struct pci_device *device);
-void vtd_remove_pci_device(struct pci_device *device);
-struct apic_irq_message
-vtd_get_remapped_root_int(unsigned int iommu, u16 device_id,
-                         unsigned int vector, unsigned int remap_index);
-int vtd_map_interrupt(struct cell *cell, u16 device_id, unsigned int vector,
-                     struct apic_irq_message irq_msg);
-void vtd_cell_exit(struct cell *cell);
-
-void vtd_config_commit(struct cell *cell_added_removed);
-
-void vtd_shutdown(void);
-
-void vtd_check_pending_faults(struct per_cpu *cpu_data);
-
-int vtd_mmio_access_handler(bool is_write, u64 addr, u32 *value);
+#endif
index 1ece8d8efc2b89ba6c7e0face36daa503f531178..c9f5c504b53dc5bab71d0e69454f7488911755c1 100644 (file)
@@ -15,8 +15,8 @@
 #include <jailhouse/printk.h>
 #include <asm/apic.h>
 #include <asm/ioapic.h>
+#include <asm/iommu.h>
 #include <asm/spinlock.h>
-#include <asm/vtd.h>
 
 #include <jailhouse/cell-config.h>
 
@@ -73,16 +73,16 @@ ioapic_translate_redir_entry(struct cell *cell, unsigned int pin,
 
                idx = entry.remap.int_index | (entry.remap.int_index15 << 15);
 
-               return vtd_get_remapped_root_int(root_cell.ioapic_iommu,
-                                                root_cell.ioapic_id, pin,
-                                                idx);
+               return iommu_get_remapped_root_int(root_cell.ioapic_iommu,
+                                                  root_cell.ioapic_id, pin,
+                                                  idx);
        }
 
        irq_msg.vector = entry.native.vector;
        irq_msg.delivery_mode = entry.native.delivery_mode;
        irq_msg.level_triggered = entry.native.level_triggered;
        irq_msg.dest_logical = entry.native.dest_logical;
-       /* align redir_hint and dest_logical - required by vtd_map_interrupt */
+       /* align redir_hint and dest_logical - required by iommu_map_interrupt */
        irq_msg.redir_hint = irq_msg.dest_logical;
        irq_msg.valid = 1;
        irq_msg.destination = entry.native.destination;
@@ -115,7 +115,7 @@ static int ioapic_virt_redir_write(struct cell *cell, unsigned int reg,
 
        irq_msg = ioapic_translate_redir_entry(cell, pin, entry);
 
-       result = vtd_map_interrupt(cell, cell->ioapic_id, pin, irq_msg);
+       result = iommu_map_interrupt(cell, cell->ioapic_id, pin, irq_msg);
        // HACK for QEMU
        if (result == -ENOSYS) {
                ioapic_reg_write(reg, entry.raw[reg & 1]);
index b7b60e411ee1046759e8c5ad3e1fe4833b251cc1..42fa8263fa225c03207af69f9a0ebdddb4c55f59 100644 (file)
@@ -18,8 +18,8 @@
 #include <jailhouse/utils.h>
 #include <asm/apic.h>
 #include <asm/io.h>
+#include <asm/iommu.h>
 #include <asm/pci.h>
-#include <asm/vtd.h>
 
 /** Protects the root bridge's PIO interface to the PCI config space. */
 static DEFINE_SPINLOCK(pci_lock);
@@ -221,12 +221,12 @@ invalid_access:
 
 int arch_pci_add_device(struct cell *cell, struct pci_device *device)
 {
-       return vtd_add_pci_device(cell, device);
+       return iommu_add_pci_device(cell, device);
 }
 
 void arch_pci_remove_device(struct pci_device *device)
 {
-       vtd_remove_pci_device(device);
+       iommu_remove_pci_device(device);
 }
 
 static union x86_msi_vector pci_get_x86_msi_vector(struct pci_device *device)
@@ -255,9 +255,9 @@ pci_translate_msi_vector(struct pci_device *device, unsigned int vector,
                idx = msi.remap.int_index | (msi.remap.int_index15 << 15);
                if (msi.remap.shv)
                        idx += msi.remap.subhandle;
-               return vtd_get_remapped_root_int(device->info->iommu,
-                                                device->info->bdf,
-                                                vector, idx);
+               return iommu_get_remapped_root_int(device->info->iommu,
+                                                  device->info->bdf,
+                                                  vector, idx);
        }
 
        irq_msg.vector = msi.native.vector;
@@ -337,7 +337,7 @@ int arch_pci_update_msi(struct pci_device *device,
 
        for (n = 0; n < vectors; n++) {
                irq_msg = pci_translate_msi_vector(device, n, vectors, msi);
-               result = vtd_map_interrupt(device->cell, bdf, n, irq_msg);
+               result = iommu_map_interrupt(device->cell, bdf, n, irq_msg);
                // HACK for QEMU
                if (result == -ENOSYS) {
                        for (n = 1; n < (info->msi_64bits ? 4 : 3); n++)
@@ -375,7 +375,7 @@ int arch_pci_update_msix_vector(struct pci_device *device, unsigned int index)
                return 0;
 
        irq_msg = pci_translate_msi_vector(device, index, 0, msi);
-       result = vtd_map_interrupt(device->cell, device->info->bdf, index,
+       result = iommu_map_interrupt(device->cell, device->info->bdf, index,
                                   irq_msg);
        // HACK for QEMU
        if (result == -ENOSYS) {
index 1dbff4c6460c390b9d95d2bb142b2fe066285365..dcf06485aee17bbb3334774229afbdd70f7c5200 100644 (file)
@@ -19,8 +19,8 @@
 #include <asm/apic.h>
 #include <asm/bitops.h>
 #include <asm/ioapic.h>
-#include <asm/vmx.h>
-#include <asm/vtd.h>
+#include <asm/iommu.h>
+#include <asm/vcpu.h>
 
 #define IDT_PRESENT_INT                0x00008e00
 
@@ -79,13 +79,18 @@ int arch_init_early(void)
        for (vector = IRQ_DESC_START; vector < NUM_IDT_DESC; vector++)
                set_idt_int_gate(vector, (unsigned long)irq_entry);
 
-       err = vmx_init();
+       err = vcpu_vendor_init();
        if (err)
                return err;
 
        return 0;
 }
 
+/*
+ * TODO: Current struct segment is VMX-specific (with 32-bit access rights).
+ * We need a generic struct segment for x86 that is converted to VMX/SVM one
+ * in the vmx.c/svm.c.
+ */
 static void read_descriptor(struct per_cpu *cpu_data, struct segment *seg)
 {
        u64 *desc = (u64 *)(cpu_data->linux_gdtr.base +
@@ -202,7 +207,7 @@ int arch_cpu_init(struct per_cpu *cpu_data)
        if (err)
                goto error_out;
 
-       err = vmx_cpu_init(cpu_data);
+       err = vcpu_init(cpu_data);
        if (err)
                goto error_out;
 
@@ -217,7 +222,7 @@ int arch_init_late()
 {
        int err;
 
-       err = vtd_init();
+       err = iommu_init();
        if (err)
                return err;
 
@@ -238,7 +243,7 @@ int arch_init_late()
 
 void arch_cpu_activate_vmm(struct per_cpu *cpu_data)
 {
-       vmx_cpu_activate_vmm(cpu_data);
+       vcpu_activate_vmm(cpu_data);
 }
 
 void arch_cpu_restore(struct per_cpu *cpu_data)
@@ -248,7 +253,7 @@ void arch_cpu_restore(struct per_cpu *cpu_data)
        if (!cpu_data->initialized)
                return;
 
-       vmx_cpu_exit(cpu_data);
+       vcpu_exit(cpu_data);
 
        write_msr(MSR_EFER, cpu_data->linux_efer);
        write_cr3(cpu_data->linux_cr3);
index 0165b907dcc38d69b72b32952289303303c025a4..1112cd966dcf6bd257769d7b8964721730f9a9f3 100644 (file)
@@ -24,7 +24,9 @@
 #include <asm/i8042.h>
 #include <asm/io.h>
 #include <asm/ioapic.h>
+#include <asm/iommu.h>
 #include <asm/pci.h>
+#include <asm/vcpu.h>
 #include <asm/vmx.h>
 #include <asm/vtd.h>
 
@@ -223,7 +225,7 @@ static void ept_set_next_pt(pt_entry_t pte, unsigned long next_pt)
                EPT_FLAG_WRITE | EPT_FLAG_EXECUTE;
 }
 
-int vmx_init(void)
+int vcpu_vendor_init(void)
 {
        unsigned int n;
        int err;
@@ -250,7 +252,7 @@ int vmx_init(void)
                msr_bitmap[VMX_MSR_BMP_0000_WRITE][MSR_X2APIC_ICR/8] = 0x01;
        }
 
-       return vmx_cell_init(&root_cell);
+       return vcpu_cell_init(&root_cell);
 }
 
 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
@@ -260,7 +262,7 @@ unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
                                flags);
 }
 
-int vmx_cell_init(struct cell *cell)
+int vcpu_cell_init(struct cell *cell)
 {
        const u8 *pio_bitmap = jailhouse_cell_pio_bitmap(cell->config);
        u32 pio_bitmap_size = cell->config->pio_bitmap_size;
@@ -285,7 +287,7 @@ int vmx_cell_init(struct cell *cell)
                            EPT_FLAG_READ | EPT_FLAG_WRITE|EPT_FLAG_WB_TYPE,
                            PAGING_NON_COHERENT);
        if (err) {
-               vmx_cell_exit(cell);
+               vcpu_cell_exit(cell);
                return err;
        }
 
@@ -324,7 +326,7 @@ int vmx_cell_init(struct cell *cell)
        return 0;
 }
 
-int vmx_map_memory_region(struct cell *cell,
+int vcpu_map_memory_region(struct cell *cell,
                          const struct jailhouse_memory *mem)
 {
        u64 phys_start = mem->phys_start;
@@ -343,14 +345,14 @@ int vmx_map_memory_region(struct cell *cell,
                             mem->virt_start, flags, PAGING_NON_COHERENT);
 }
 
-int vmx_unmap_memory_region(struct cell *cell,
+int vcpu_unmap_memory_region(struct cell *cell,
                            const struct jailhouse_memory *mem)
 {
        return paging_destroy(&cell->vmx.ept_structs, mem->virt_start,
                              mem->size, PAGING_NON_COHERENT);
 }
 
-void vmx_cell_exit(struct cell *cell)
+void vcpu_cell_exit(struct cell *cell)
 {
        const u8 *root_pio_bitmap =
                jailhouse_cell_pio_bitmap(root_cell.config);
@@ -371,7 +373,7 @@ void vmx_cell_exit(struct cell *cell)
        page_free(&mem_pool, cell->vmx.ept_structs.root_table, 1);
 }
 
-void vmx_invept(void)
+void vcpu_tlb_flush(void)
 {
        unsigned long ept_cap = read_msr(MSR_IA32_VMX_EPT_VPID_CAP);
        struct {
@@ -430,7 +432,7 @@ static bool vmx_set_guest_cr(int cr, unsigned long val)
        return ok;
 }
 
-static bool vmx_set_cell_config(struct cell *cell)
+static bool vcpu_set_cell_config(struct cell *cell)
 {
        u8 *io_bitmap;
        bool ok = true;
@@ -565,7 +567,7 @@ static bool vmcs_setup(struct per_cpu *cpu_data)
        ok &= vmcs_write64(APIC_ACCESS_ADDR,
                           paging_hvirt2phys(apic_access_page));
 
-       ok &= vmx_set_cell_config(cpu_data->cell);
+       ok &= vcpu_set_cell_config(cpu_data->cell);
 
        ok &= vmcs_write32(EXCEPTION_BITMAP, 0);
 
@@ -589,7 +591,7 @@ static bool vmcs_setup(struct per_cpu *cpu_data)
        return ok;
 }
 
-int vmx_cpu_init(struct per_cpu *cpu_data)
+int vcpu_init(struct per_cpu *cpu_data)
 {
        unsigned long cr4, feature_ctrl, mask;
        u32 revision_id;
@@ -644,7 +646,7 @@ int vmx_cpu_init(struct per_cpu *cpu_data)
        return 0;
 }
 
-void vmx_cpu_exit(struct per_cpu *cpu_data)
+void vcpu_exit(struct per_cpu *cpu_data)
 {
        if (cpu_data->vmx_state == VMXOFF)
                return;
@@ -659,7 +661,7 @@ void vmx_cpu_exit(struct per_cpu *cpu_data)
        write_cr4(read_cr4() & ~X86_CR4_VMXE);
 }
 
-void vmx_cpu_activate_vmm(struct per_cpu *cpu_data)
+void vcpu_activate_vmm(struct per_cpu *cpu_data)
 {
        /* We enter Linux at the point arch_entry would return to as well.
         * rax is cleared to signal success to the caller. */
@@ -741,7 +743,7 @@ vmx_cpu_deactivate_vmm(struct registers *guest_regs)
        __builtin_unreachable();
 }
 
-static void vmx_cpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
+static void vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
 {
        unsigned long val;
        bool ok = true;
@@ -823,7 +825,7 @@ static void vmx_cpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
        val &= ~VM_ENTRY_IA32E_MODE;
        ok &= vmcs_write32(VM_ENTRY_CONTROLS, val);
 
-       ok &= vmx_set_cell_config(cpu_data->cell);
+       ok &= vcpu_set_cell_config(cpu_data->cell);
 
        if (!ok) {
                panic_printk("FATAL: CPU reset failed\n");
@@ -843,9 +845,9 @@ void vmx_schedule_vmexit(struct per_cpu *cpu_data)
        vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, pin_based_ctrl);
 }
 
-void vmx_cpu_park(struct per_cpu *cpu_data)
+void vcpu_park(struct per_cpu *cpu_data)
 {
-       vmx_cpu_reset(cpu_data, 0);
+       vcpu_reset(cpu_data, 0);
        vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_HLT);
 }
 
@@ -875,7 +877,7 @@ static void update_efer(void)
                     vmcs_read32(VM_ENTRY_CONTROLS) | VM_ENTRY_IA32E_MODE);
 }
 
-static void vmx_handle_hypercall(struct registers *guest_regs)
+static void vcpu_handle_hypercall(struct registers *guest_regs)
 {
        bool ia32e_mode = !!(vmcs_read64(GUEST_IA32_EFER) & EFER_LMA);
        unsigned long arg_mask = ia32e_mode ? (u64)-1 : (u32)-1;
@@ -1018,7 +1020,7 @@ static void dump_guest_regs(struct registers *guest_regs)
        panic_printk("EFER: %p\n", vmcs_read64(GUEST_IA32_EFER));
 }
 
-static bool vmx_handle_io_access(struct registers *guest_regs,
+static bool vcpu_handle_io_access(struct registers *guest_regs,
                                 struct per_cpu *cpu_data)
 {
        /* parse exit qualification for I/O instructions (see SDM, 27.2.1 ) */
@@ -1051,7 +1053,7 @@ invalid_access:
        return false;
 }
 
-static bool vmx_handle_ept_violation(struct registers *guest_regs,
+static bool vcpu_handle_pt_violation(struct registers *guest_regs,
                                     struct per_cpu *cpu_data)
 {
        u64 phys_addr = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
@@ -1082,7 +1084,7 @@ static bool vmx_handle_ept_violation(struct registers *guest_regs,
                result = pci_mmio_access_handler(cpu_data->cell, is_write,
                                                 phys_addr, &val);
        if (result == 0)
-               result = vtd_mmio_access_handler(is_write, phys_addr, &val);
+               result = iommu_mmio_access_handler(is_write, phys_addr, &val);
 
        if (result == 1) {
                if (!is_write)
@@ -1100,7 +1102,7 @@ invalid_access:
        return false;
 }
 
-void vmx_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
+void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
 {
        u32 reason = vmcs_read32(VM_EXIT_REASON);
        int sipi_vector;
@@ -1118,10 +1120,10 @@ void vmx_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
                if (sipi_vector >= 0) {
                        printk("CPU %d received SIPI, vector %x\n",
                               cpu_data->cpu_id, sipi_vector);
-                       vmx_cpu_reset(cpu_data, sipi_vector);
+                       vcpu_reset(cpu_data, sipi_vector);
                        memset(guest_regs, 0, sizeof(*guest_regs));
                }
-               vtd_check_pending_faults(cpu_data);
+               iommu_check_pending_faults(cpu_data);
                return;
        case EXIT_REASON_CPUID:
                vmx_skip_emulated_instruction(X86_INST_LEN_CPUID);
@@ -1133,7 +1135,7 @@ void vmx_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
                        (u32 *)&guest_regs->rcx, (u32 *)&guest_regs->rdx);
                return;
        case EXIT_REASON_VMCALL:
-               vmx_handle_hypercall(guest_regs);
+               vcpu_handle_hypercall(guest_regs);
                return;
        case EXIT_REASON_CR_ACCESS:
                cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
@@ -1186,12 +1188,12 @@ void vmx_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
                break;
        case EXIT_REASON_IO_INSTRUCTION:
                cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
-               if (vmx_handle_io_access(guest_regs, cpu_data))
+               if (vcpu_handle_io_access(guest_regs, cpu_data))
                        return;
                break;
        case EXIT_REASON_EPT_VIOLATION:
                cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
-               if (vmx_handle_ept_violation(guest_regs, cpu_data))
+               if (vcpu_handle_pt_violation(guest_regs, cpu_data))
                        return;
                break;
        default:
@@ -1206,7 +1208,7 @@ void vmx_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
        panic_park();
 }
 
-void vmx_entry_failure(struct per_cpu *cpu_data)
+void vcpu_entry_failure(struct per_cpu *cpu_data)
 {
        panic_printk("FATAL: vmresume failed, error %d\n",
                     vmcs_read32(VM_INSTRUCTION_ERROR));
index 92dd4092029036875e549a36eade88e88b6120fd..291f7db361bc51757e629537a2a0ff1021c6e731 100644 (file)
@@ -2,9 +2,11 @@
  * Jailhouse, a Linux-based partitioning hypervisor
  *
  * Copyright (c) Siemens AG, 2013, 2014
+ * Copyright (c) Valentine Sinitsyn, 2014
  *
  * Authors:
  *  Jan Kiszka <jan.kiszka@siemens.com>
+ *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
  *
  * This work is licensed under the terms of the GNU GPL, version 2.  See
  * the COPYING file in the top-level directory.
@@ -16,6 +18,9 @@
 #include <jailhouse/pci.h>
 #include <jailhouse/printk.h>
 #include <jailhouse/string.h>
+#include <asm/vtd.h>
+#include <asm/apic.h>
+#include <asm/iommu.h>
 #include <asm/bitops.h>
 #include <asm/ioapic.h>
 #include <asm/spinlock.h>
@@ -206,7 +211,7 @@ static void vtd_print_fault_record_reg_status(void *reg_base)
        printk(" Fault Reason: 0x%x Fault Info: %lx Type %d\n", fr, fi, type);
 }
 
-void vtd_check_pending_faults(struct per_cpu *cpu_data)
+void iommu_check_pending_faults(struct per_cpu *cpu_data)
 {
        unsigned int fr_index;
        void *reg_base = dmar_reg_base;
@@ -241,9 +246,9 @@ static int vtd_emulate_inv_int(unsigned int unit_no, unsigned int index)
        if (!irte_usage->used)
                return 0;
 
-       irq_msg = vtd_get_remapped_root_int(unit_no, irte_usage->device_id,
+       irq_msg = iommu_get_remapped_root_int(unit_no, irte_usage->device_id,
                                            irte_usage->vector, index);
-       return vtd_map_interrupt(&root_cell, irte_usage->device_id,
+       return iommu_map_interrupt(&root_cell, irte_usage->device_id,
                                 irte_usage->vector, irq_msg);
 }
 
@@ -333,7 +338,7 @@ invalid_iq_entry:
        return -1;
 }
 
-int vtd_mmio_access_handler(bool is_write, u64 addr, u32 *value)
+int iommu_mmio_access_handler(bool is_write, u64 addr, u32 *value)
 {
        unsigned int n;
        u64 base_addr;
@@ -430,7 +435,7 @@ static int vtd_init_ir_emulation(void *reg_base, unsigned int unit_no)
        return 0;
 }
 
-int vtd_init(void)
+int iommu_init(void)
 {
        unsigned long version, caps, ecaps, ctrls, sllps_caps = ~0UL;
        unsigned int pt_levels, num_did, n;
@@ -538,7 +543,7 @@ int vtd_init(void)
        if (!(sllps_caps & VTD_CAP_SLLPS2M))
                vtd_paging[dmar_pt_levels - 2].page_size = 0;
 
-       return vtd_cell_init(&root_cell);
+       return iommu_cell_init(&root_cell);
 }
 
 static void vtd_update_irte(unsigned int index, union vtd_irte content)
@@ -631,7 +636,7 @@ static void vtd_free_int_remap_region(u16 device_id, unsigned int length)
        }
 }
 
-int vtd_add_pci_device(struct cell *cell, struct pci_device *device)
+int iommu_add_pci_device(struct cell *cell, struct pci_device *device)
 {
        unsigned int max_vectors = MAX(device->info->num_msi_vectors,
                                       device->info->num_msix_vectors);
@@ -675,7 +680,7 @@ error_nomem:
        return -ENOMEM;
 }
 
-void vtd_remove_pci_device(struct pci_device *device)
+void iommu_remove_pci_device(struct pci_device *device)
 {
        u16 bdf = device->info->bdf;
        u64 *root_entry_lo = &root_entry_table[PCI_BUS(bdf)].lo_word;
@@ -705,7 +710,7 @@ void vtd_remove_pci_device(struct pci_device *device)
        page_free(&mem_pool, context_entry_table, 1);
 }
 
-int vtd_cell_init(struct cell *cell)
+int iommu_cell_init(struct cell *cell)
 {
        const struct jailhouse_irqchip *irqchip =
                jailhouse_cell_irqchips(cell->config);
@@ -729,7 +734,7 @@ int vtd_cell_init(struct cell *cell)
                result = vtd_reserve_int_remap_region(irqchip->id,
                                                      IOAPIC_NUM_PINS);
                if (result < 0) {
-                       vtd_cell_exit(cell);
+                       iommu_cell_exit(cell);
                        return result;
                }
        }
@@ -739,7 +744,7 @@ int vtd_cell_init(struct cell *cell)
        return 0;
 }
 
-int vtd_map_memory_region(struct cell *cell,
+int iommu_map_memory_region(struct cell *cell,
                          const struct jailhouse_memory *mem)
 {
        u32 flags = 0;
@@ -761,7 +766,7 @@ int vtd_map_memory_region(struct cell *cell,
                             PAGING_COHERENT);
 }
 
-int vtd_unmap_memory_region(struct cell *cell,
+int iommu_unmap_memory_region(struct cell *cell,
                            const struct jailhouse_memory *mem)
 {
        // HACK for QEMU
@@ -776,7 +781,7 @@ int vtd_unmap_memory_region(struct cell *cell,
 }
 
 struct apic_irq_message
-vtd_get_remapped_root_int(unsigned int iommu, u16 device_id,
+iommu_get_remapped_root_int(unsigned int iommu, u16 device_id,
                          unsigned int vector, unsigned int remap_index)
 {
        struct vtd_emulation *unit = &root_cell_units[iommu];
@@ -826,7 +831,7 @@ vtd_get_remapped_root_int(unsigned int iommu, u16 device_id,
        return irq_msg;
 }
 
-int vtd_map_interrupt(struct cell *cell, u16 device_id, unsigned int vector,
+int iommu_map_interrupt(struct cell *cell, u16 device_id, unsigned int vector,
                      struct apic_irq_message irq_msg)
 {
        u32 dest = irq_msg.destination;
@@ -901,7 +906,7 @@ update_irte:
        return base_index + vector;
 }
 
-void vtd_cell_exit(struct cell *cell)
+void iommu_cell_exit(struct cell *cell)
 {
        // HACK for QEMU
        if (dmar_units == 0)
@@ -915,7 +920,7 @@ void vtd_cell_exit(struct cell *cell)
         */
 }
 
-void vtd_config_commit(struct cell *cell_added_removed)
+void iommu_config_commit(struct cell *cell_added_removed)
 {
        void *inv_queue = unit_inv_queue;
        void *reg_base = dmar_reg_base;
@@ -975,7 +980,7 @@ static void vtd_restore_ir(unsigned int unit_no, void *reg_base)
        mmio_write32(reg_base + VTD_FECTL_REG, unit->fectl);
 }
 
-void vtd_shutdown(void)
+void iommu_shutdown(void)
 {
        void *reg_base = dmar_reg_base;
        unsigned int n;