#include <jailhouse/string.h>
#include <jailhouse/control.h>
#include <jailhouse/hypercall.h>
+#include <jailhouse/mmio.h>
+#include <jailhouse/pci.h>
#include <asm/apic.h>
#include <asm/control.h>
#include <asm/vmx.h>
return false;
}
+static bool vmx_handle_ept_violation(struct registers *guest_regs,
+ struct per_cpu *cpu_data)
+{
+ u64 phys_addr = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+ u64 exitq = vmcs_read64(EXIT_QUALIFICATION);
+ struct guest_paging_structures pg_structs;
+ struct mmio_access access;
+ bool is_write;
+
+ /* We don't enable dirty/accessed bit updated in EPTP, so only read
+ * of write flags can be set, not both. */
+ is_write = !!(exitq & 0x2);
+
+ if (!vmx_get_guest_paging_structs(&pg_structs))
+ return false;
+ access = mmio_parse(cpu_data, vmcs_read64(GUEST_RIP),
+ &pg_structs, is_write);
+ if (!access.inst_len || access.size != 4)
+ return false;
+
+ /* Filter out requests to PCI configuration space */
+ if (pci_mmio_access_handler(guest_regs, cpu_data->cell,
+ is_write, phys_addr, access.reg) == 1) {
+ vmx_skip_emulated_instruction(
+ vmcs_read64(VM_EXIT_INSTRUCTION_LEN));
+ return true;
+ }
+
+ panic_printk("FATAL: Invalid EPT %s, addr: %p\n",
+ is_write ? "read" : "write", phys_addr);
+
+ return false;
+}
+
void vmx_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
{
u32 reason = vmcs_read32(VM_EXIT_REASON);
if (vmx_handle_io_access(guest_regs, cpu_data))
return;
break;
+ case EXIT_REASON_EPT_VIOLATION:
+ if (vmx_handle_ept_violation(guest_regs, cpu_data))
+ return;
+ break;
default:
panic_printk("FATAL: Unhandled VM-Exit, reason %d, ",
(u16)reason);
* the COPYING file in the top-level directory.
*/
+#include <jailhouse/acpi.h>
#include <jailhouse/pci.h>
+#include <jailhouse/printk.h>
#include <jailhouse/utils.h>
+struct acpi_mcfg_alloc {
+ u64 base_addr;
+ u16 segment_num;
+ u8 start_bus;
+ u8 end_bus;
+ u32 reserved;
+} __attribute__((packed));
+
+struct acpi_mcfg_table {
+ struct acpi_table_header header;
+ u8 reserved[8];
+ struct acpi_mcfg_alloc alloc_structs[];
+} __attribute__((packed));
+
/* entry for PCI config space whitelist (granting access) */
struct pci_cfg_access {
u32 reg_num; /** Register number (4-byte aligned) */
{ 0x3c, 0xffff00ff }, /* Int Line, Bridge Control */
};
+static void *pci_space;
+static u64 pci_mmcfg_addr;
+static u32 pci_mmcfg_size;
+
/**
* pci_get_assigned_device() - Look up device owned by a cell
* @cell: Owning cell
return false;
}
+
+/**
+ * pci_init() - Initialization of PCI module
+ *
+ * Return: 0 - success, error code - if error.
+ */
+int pci_init(void)
+{
+ struct acpi_mcfg_table *mcfg;
+
+ mcfg = (struct acpi_mcfg_table *)acpi_find_table("MCFG", NULL);
+ if (!mcfg)
+ return 0;
+
+ if (mcfg->header.length !=
+ sizeof(struct acpi_mcfg_table) + sizeof(struct acpi_mcfg_alloc))
+ return -EIO;
+
+ pci_mmcfg_addr = mcfg->alloc_structs[0].base_addr;
+ pci_mmcfg_size = (mcfg->alloc_structs[0].end_bus -
+ mcfg->alloc_structs[0].start_bus) * 256 * 4096;
+ pci_space = page_alloc(&remap_pool, pci_mmcfg_size / PAGE_SIZE);
+ if (pci_space)
+ page_map_create(&hv_paging_structs,
+ mcfg->alloc_structs[0].base_addr,
+ pci_mmcfg_size, (unsigned long)pci_space,
+ PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
+ PAGE_MAP_NON_COHERENT);
+
+ return 0;
+}
+
+/**
+ * pci_mmio_access_handler() - Handler for MMIO-accesses to PCI config space
+ * @cell: Request issuing cell
+ * @is_write: True if write access
+ * @addr: Address accessed
+ * @value: Value to write (for write operations only)
+ *
+ * Return: 1 if handled successfully, 0 if unhandled, -1 on access error
+ */
+int pci_mmio_access_handler(struct registers *guest_regs,
+ const struct cell *cell, bool is_write,
+ u64 addr, u32 reg)
+{
+ const struct jailhouse_pci_device *device;
+ u32 mmcfg_offset;
+ u32 reg_num;
+ u32 reg_bias;
+
+ if (addr < pci_mmcfg_addr ||
+ addr >= (pci_mmcfg_addr + pci_mmcfg_size - 4))
+ return 0;
+
+ mmcfg_offset = addr - pci_mmcfg_addr;
+ reg_bias = mmcfg_offset % 4;
+ reg_num = mmcfg_offset & 0xfff;
+ device = pci_get_assigned_device(cell, mmcfg_offset >> 12);
+
+ if (is_write) {
+ if (!device)
+ return 1;
+
+ if (reg_num < PCI_CONFIG_HEADER_SIZE)
+ if (pci_cfg_write_allowed(device->type,
+ (reg_num - reg_bias),
+ reg_bias, 4))
+ *(volatile u32 *)(pci_space + mmcfg_offset) =
+ ((u64 *)guest_regs)[reg];
+ } else
+ if (device)
+ ((u64 *)guest_regs)[reg] =
+ *(volatile u32 *)(pci_space + mmcfg_offset);
+ else
+ ((u64 *)guest_regs)[reg] = BYTE_MASK(4);
+
+ return 1;
+}