/*
* Jailhouse, a Linux-based partitioning hypervisor
*
- * Copyright (c) Siemens AG, 2014
+ * Copyright (c) Siemens AG, 2014, 2015
*
* Authors:
* Ivan Kolchin <ivan.kolchin@siemens.com>
(counter) < (dev)->info->num_caps; \
(cap)++, (counter)++)
-/* entry for PCI config space whitelist (granting access) */
-struct pci_cfg_access {
- u32 reg_num; /* Register number (4-byte aligned) */
- u32 mask; /* Bit set: access allowed */
+/* entry for PCI config space access control */
+struct pci_cfg_control {
+ enum {
+ PCI_CONFIG_DENY,
+ PCI_CONFIG_ALLOW,
+ PCI_CONFIG_RDONLY,
+ } type; /* Access type */
+ u32 mask; /* Bit set: access type applies; bit cleared: deny access */
};
-/* --- Whilelists for writing to PCI config space registers --- */
+/* --- Access control for writing to PCI config space registers --- */
/* Type 1: Endpoints */
-static const struct pci_cfg_access endpoint_write_access[] = {
- { 0x04, 0xffffffff }, /* Command, Status */
- { 0x0c, 0xff00ffff }, /* BIST, Latency Timer, Cacheline */
- { 0x3c, 0x000000ff }, /* Int Line */
+static const struct pci_cfg_control endpoint_write[PCI_CONFIG_HEADER_SIZE] = {
+ [0x04/4] = {PCI_CONFIG_ALLOW, 0xffffffff}, /* Command, Status */
+ [0x0c/4] = {PCI_CONFIG_ALLOW, 0xff00ffff}, /* BIST, Lat., Cacheline */
+ [0x30/4] = {PCI_CONFIG_RDONLY, 0xffffffff}, /* ROM BAR */
+ [0x3c/4] = {PCI_CONFIG_ALLOW, 0x000000ff}, /* Int Line */
};
-/* Type 2: Bridges */
-static const struct pci_cfg_access bridge_write_access[] = {
- { 0x04, 0xffffffff }, /* Command, Status */
- { 0x0c, 0xff00ffff }, /* BIST, Latency Timer, Cacheline */
- { 0x3c, 0xffff00ff }, /* Int Line, Bridge Control */
+
+/* Type 2: Bridges
+ * Note: Ignore limit/base reprogramming attempts because the root cell will
+ * perform them on bus rescans. */
+static const struct pci_cfg_control bridge_write[PCI_CONFIG_HEADER_SIZE] = {
+ [0x04/4] = {PCI_CONFIG_ALLOW, 0xffffffff}, /* Command, Status */
+ [0x0c/4] = {PCI_CONFIG_ALLOW, 0xff00ffff}, /* BIST, Lat., Cacheline */
+ [0x1c/4] = {PCI_CONFIG_RDONLY, 0x0000ffff}, /* I/O Limit & Base */
+ [0x20/4 ... /* Memory Limit/Base, Prefetch Memory Limit/Base, */
+ 0x30/4] = {PCI_CONFIG_RDONLY, 0xffffffff}, /* I/O Limit & Base */
+ [0x3c/4] = {PCI_CONFIG_ALLOW, 0xffff00ff}, /* Int Line, Bridge Ctrl */
};
static void *pci_space;
-static u64 mmcfg_start, mmcfg_end;
+static u64 mmcfg_start, mmcfg_size;
static u8 end_bus;
+unsigned int pci_mmio_count_regions(struct cell *cell)
+{
+ const struct jailhouse_pci_device *dev_infos =
+ jailhouse_cell_pci_devices(cell->config);
+ unsigned int n, regions = 0;
+
+ if (system_config->platform_info.x86.mmconfig_base)
+ regions++;
+
+ for (n = 0; n < cell->config->num_pci_devices; n++)
+ if (dev_infos[n].type == JAILHOUSE_PCI_TYPE_IVSHMEM)
+ regions += PCI_IVSHMEM_NUM_MMIO_REGIONS;
+ else if (dev_infos[n].msix_address)
+ regions++;
+
+ return regions;
+}
+
static void *pci_get_device_mmcfg_base(u16 bdf)
{
return pci_space + ((unsigned long)bdf << 12);
unsigned int size, u32 *value)
{
const struct jailhouse_pci_capability *cap;
- unsigned int cap_offs;
+ unsigned int bar_no, cap_offs;
if (!device) {
*value = -1;
return PCI_ACCESS_DONE;
}
+ /* Emulate BARs for physical and virtual devices */
+ if (device->info->type != JAILHOUSE_PCI_TYPE_BRIDGE) {
+ /* Emulate BAR access, always returning the shadow value. */
+ if (address >= PCI_CFG_BAR && address <= PCI_CFG_BAR_END) {
+ bar_no = (address - PCI_CFG_BAR) / 4;
+ *value = device->bar[bar_no] >> ((address % 4) * 8);
+ return PCI_ACCESS_DONE;
+ }
+
+ /* We do not expose ROMs. */
+ if (address >= PCI_CFG_ROMBAR && address < PCI_CFG_CAPS) {
+ *value = 0;
+ return PCI_ACCESS_DONE;
+ }
+ }
+
if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
- return pci_ivshmem_cfg_read(device, address, size, value);
+ return pci_ivshmem_cfg_read(device, address, value);
if (address < PCI_CONFIG_HEADER_SIZE)
return PCI_ACCESS_PERFORM;
{
const struct jailhouse_pci_capability *cap;
/* initialize list to work around wrong compiler warning */
- const struct pci_cfg_access *list = NULL;
unsigned int bias_shift = (address % 4) * 8;
u32 mask = BYTE_MASK(size) << bias_shift;
- unsigned int n, cap_offs, len = 0;
+ struct pci_cfg_control cfg_control;
+ unsigned int bar_no, cap_offs;
if (!device)
return PCI_ACCESS_REJECT;
- if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
- return pci_ivshmem_cfg_write(device, address, size, value);
+ value <<= bias_shift;
+
+ /* Emulate BARs for physical and virtual devices */
+ if (device->info->type != JAILHOUSE_PCI_TYPE_BRIDGE &&
+ address >= PCI_CFG_BAR && address <= PCI_CFG_BAR_END) {
+ bar_no = (address - PCI_CFG_BAR) / 4;
+ mask &= device->info->bar_mask[bar_no];
+ device->bar[bar_no] &= ~mask;
+ device->bar[bar_no] |= value & mask;
+ return PCI_ACCESS_DONE;
+ }
if (address < PCI_CONFIG_HEADER_SIZE) {
- if (device->info->type == JAILHOUSE_PCI_TYPE_DEVICE) {
- list = endpoint_write_access;
- len = ARRAY_SIZE(endpoint_write_access);
- } else if (device->info->type == JAILHOUSE_PCI_TYPE_BRIDGE) {
- list = bridge_write_access;
- len = ARRAY_SIZE(bridge_write_access);
- }
+ if (device->info->type == JAILHOUSE_PCI_TYPE_BRIDGE)
+ cfg_control = bridge_write[address / 4];
+ else /* physical or virtual device */
+ cfg_control = endpoint_write[address / 4];
- for (n = 0; n < len; n++) {
- if (list[n].reg_num == (address & 0xffc) &&
- (list[n].mask & mask) == mask)
- return PCI_ACCESS_PERFORM;
- }
+ if ((cfg_control.mask & mask) != mask)
+ return PCI_ACCESS_REJECT;
- // HACK to allow PCI bus rescanning in root-cell
- if (device->info->type == JAILHOUSE_PCI_TYPE_BRIDGE &&
- device->cell == &root_cell)
+ switch (cfg_control.type) {
+ case PCI_CONFIG_ALLOW:
+ if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
+ return pci_ivshmem_cfg_write(device,
+ address / 4, mask, value);
+ return PCI_ACCESS_PERFORM;
+ case PCI_CONFIG_RDONLY:
return PCI_ACCESS_DONE;
-
- return PCI_ACCESS_REJECT;
+ default:
+ return PCI_ACCESS_REJECT;
+ }
}
+ if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
+ return pci_ivshmem_cfg_write(device, address / 4, mask, value);
+
cap = pci_find_capability(device, address);
if (!cap || !(cap->flags & JAILHOUSE_PCICAPS_WRITE))
return PCI_ACCESS_REJECT;
- value <<= bias_shift;
-
cap_offs = address - cap->start;
if (cap->id == PCI_CAP_MSI &&
(cap_offs < 10 || (device->info->msi_64bits && cap_offs < 14))) {
*/
int pci_init(void)
{
- unsigned int mmcfg_size;
int err;
- err = pci_cell_init(&root_cell);
- if (err)
- return err;
-
mmcfg_start = system_config->platform_info.x86.mmconfig_base;
- if (mmcfg_start == 0)
- return 0;
+ if (mmcfg_start != 0) {
+ end_bus = system_config->platform_info.x86.mmconfig_end_bus;
+ mmcfg_size = (end_bus + 1) * 256 * 4096;
- end_bus = system_config->platform_info.x86.mmconfig_end_bus;
- mmcfg_size = (end_bus + 1) * 256 * 4096;
- mmcfg_end = mmcfg_start + mmcfg_size - 4;
+ pci_space = page_alloc(&remap_pool, mmcfg_size / PAGE_SIZE);
+ if (!pci_space)
+ return trace_error(-ENOMEM);
- pci_space = page_alloc(&remap_pool, mmcfg_size / PAGE_SIZE);
- if (!pci_space)
- return trace_error(-ENOMEM);
+ err = paging_create(&hv_paging_structs, mmcfg_start,
+ mmcfg_size, (unsigned long)pci_space,
+ PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
+ PAGING_NON_COHERENT);
+ if (err)
+ return err;
+ }
- return paging_create(&hv_paging_structs, mmcfg_start, mmcfg_size,
- (unsigned long)pci_space,
- PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
- PAGING_NON_COHERENT);
+ return pci_cell_init(&root_cell);
}
-static int pci_msix_access_handler(const struct cell *cell, bool is_write,
- u64 addr, u32 *value)
+static enum mmio_result pci_msix_access_handler(void *arg,
+ struct mmio_access *mmio)
{
- unsigned int dword = (addr % sizeof(union pci_msix_vector)) >> 2;
- struct pci_device *device = cell->msix_device_list;
+ unsigned int dword =
+ (mmio->address % sizeof(union pci_msix_vector)) >> 2;
+ struct pci_device *device = arg;
unsigned int index;
- u64 offs;
-
- while (device) {
- if (addr >= device->info->msix_address &&
- addr < device->info->msix_address +
- device->info->msix_region_size)
- goto found;
- device = device->next_msix_device;
- }
- return 0;
-found:
/* access must be DWORD-aligned */
- if (addr & 0x3)
+ if (mmio->address & 0x3)
goto invalid_access;
- offs = addr - device->info->msix_address;
- index = offs / sizeof(union pci_msix_vector);
+ index = mmio->address / sizeof(union pci_msix_vector);
- if (is_write) {
+ if (mmio->is_write) {
/*
* The PBA may share a page with the MSI-X table. Writing to
* PBA entries is undefined. We declare it as invalid.
if (index >= device->info->num_msix_vectors)
goto invalid_access;
- device->msix_vectors[index].raw[dword] = *value;
+ device->msix_vectors[index].raw[dword] = mmio->value;
if (arch_pci_update_msix_vector(device, index) < 0)
goto invalid_access;
if (dword == MSIX_VECTOR_CTRL_DWORD)
mmio_write32(&device->msix_table[index].raw[dword],
- *value);
+ mmio->value);
} else {
if (index >= device->info->num_msix_vectors ||
dword == MSIX_VECTOR_CTRL_DWORD)
- *value =
- mmio_read32(((void *)device->msix_table) + offs);
+ mmio->value = mmio_read32(((void *)device->msix_table) +
+ mmio->address);
else
- *value = device->msix_vectors[index].raw[dword];
+ mmio->value = device->msix_vectors[index].raw[dword];
}
- return 1;
+ return MMIO_HANDLED;
invalid_access:
- panic_printk("FATAL: Invalid PCI MSIX BAR write, device "
+ panic_printk("FATAL: Invalid PCI MSI-X table/PBA access, device "
"%02x:%02x.%x\n", PCI_BDF_PARAMS(device->info->bdf));
- return -1;
+ return MMIO_ERROR;
}
-/**
- * Handler for MMIO-accesses to PCI config space.
- * @param cell Request issuing cell.
- * @param is_write True if write access.
- * @param addr Address accessed.
- * @param value Pointer to value for reading/writing.
- *
- * @return 1 if handled successfully, 0 if unhandled, -1 on access error.
- */
-int pci_mmio_access_handler(const struct cell *cell, bool is_write,
- u64 addr, u32 *value)
+static enum mmio_result pci_mmconfig_access_handler(void *arg,
+ struct mmio_access *mmio)
{
- u32 mmcfg_offset, reg_addr;
+ u32 reg_addr = mmio->address & 0xfff;
struct pci_device *device;
- enum pci_access access;
- int ret;
-
- if (!pci_space || addr < mmcfg_start || addr > mmcfg_end) {
- ret = pci_msix_access_handler(cell, is_write, addr, value);
- if (ret == 0)
- ret = ivshmem_mmio_access_handler(cell, is_write, addr,
- value);
- return ret;
- }
+ enum pci_access result;
+ u32 val;
- mmcfg_offset = addr - mmcfg_start;
- reg_addr = mmcfg_offset & 0xfff;
/* access must be DWORD-aligned */
if (reg_addr & 0x3)
goto invalid_access;
- device = pci_get_assigned_device(cell, mmcfg_offset >> 12);
+ device = pci_get_assigned_device(this_cell(), mmio->address >> 12);
- if (is_write) {
- access = pci_cfg_write_moderate(device, reg_addr, 4, *value);
- if (access == PCI_ACCESS_REJECT)
+ if (mmio->is_write) {
+ result = pci_cfg_write_moderate(device, reg_addr, 4,
+ mmio->value);
+ if (result == PCI_ACCESS_REJECT)
goto invalid_access;
- if (access == PCI_ACCESS_PERFORM)
- mmio_write32(pci_space + mmcfg_offset, *value);
+ if (result == PCI_ACCESS_PERFORM)
+ mmio_write32(pci_space + mmio->address, mmio->value);
} else {
- access = pci_cfg_read_moderate(device, reg_addr, 4, value);
- if (access == PCI_ACCESS_PERFORM)
- *value = mmio_read32(pci_space + mmcfg_offset);
+ result = pci_cfg_read_moderate(device, reg_addr, 4, &val);
+ if (result == PCI_ACCESS_PERFORM)
+ mmio->value = mmio_read32(pci_space + mmio->address);
+ else
+ mmio->value = val;
}
- return 1;
+ return MMIO_HANDLED;
invalid_access:
panic_printk("FATAL: Invalid PCI MMCONFIG write, device %02x:%02x.%x, "
- "reg: %\n", PCI_BDF_PARAMS(mmcfg_offset >> 12), reg_addr);
- return -1;
+ "reg: %x\n", PCI_BDF_PARAMS(mmio->address >> 12),
+ reg_addr);
+ return MMIO_ERROR;
}
}
}
-static int pci_add_virtual_device(struct cell *cell, struct pci_device *device)
+static int pci_add_physical_device(struct cell *cell, struct pci_device *device)
{
- device->cell = cell;
- device->next_virtual_device = cell->virtual_device_list;
- cell->virtual_device_list = device;
- return arch_pci_add_device(cell, device);
-}
-
-static int pci_add_device(struct cell *cell, struct pci_device *device)
-{
- unsigned int size = device->info->msix_region_size;
+ unsigned int n, pages, size = device->info->msix_region_size;
int err;
printk("Adding PCI device %02x:%02x.%x to cell \"%s\"\n",
PCI_BDF_PARAMS(device->info->bdf), cell->config->name);
- err = arch_pci_add_device(cell, device);
+ for (n = 0; n < PCI_NUM_BARS; n ++)
+ device->bar[n] = pci_read_config(device->info->bdf,
+ PCI_CFG_BAR + n * 4, 4);
+
+ err = arch_pci_add_physical_device(cell, device);
if (!err && device->info->msix_address) {
device->msix_table = page_alloc(&remap_pool, size / PAGE_SIZE);
if (err)
goto error_page_free;
- device->next_msix_device = cell->msix_device_list;
- cell->msix_device_list = device;
+ if (device->info->num_msix_vectors > PCI_EMBEDDED_MSIX_VECTS) {
+ pages = PAGES(sizeof(union pci_msix_vector) *
+ device->info->num_msix_vectors);
+ device->msix_vectors = page_alloc(&mem_pool, pages);
+ if (!device->msix_vectors) {
+ err = -ENOMEM;
+ goto error_unmap_table;
+ }
+ }
+
+ mmio_region_register(cell, device->info->msix_address, size,
+ pci_msix_access_handler, device);
}
return err;
+error_unmap_table:
+ /* cannot fail, destruction of same size as construction */
+ paging_destroy(&hv_paging_structs, (unsigned long)device->msix_table,
+ size, PAGING_NON_COHERENT);
error_page_free:
page_free(&remap_pool, device->msix_table, size / PAGE_SIZE);
error_remove_dev:
- arch_pci_remove_device(device);
+ arch_pci_remove_physical_device(device);
return err;
}
-static void pci_remove_virtual_device(struct pci_device *device)
-{
- struct pci_device *prev = device->cell->virtual_device_list;
-
- arch_pci_remove_device(device);
- if (prev == device) {
- device->cell->virtual_device_list = device->next_virtual_device;
- } else {
- while (prev->next_virtual_device != device)
- prev = prev->next_virtual_device;
- prev->next_virtual_device = device->next_virtual_device;
- }
-}
-
-static void pci_remove_device(struct pci_device *device)
+static void pci_remove_physical_device(struct pci_device *device)
{
unsigned int size = device->info->msix_region_size;
- struct pci_device *prev_msix_device;
printk("Removing PCI device %02x:%02x.%x from cell \"%s\"\n",
PCI_BDF_PARAMS(device->info->bdf), device->cell->config->name);
- arch_pci_remove_device(device);
+ arch_pci_remove_physical_device(device);
pci_write_config(device->info->bdf, PCI_CFG_COMMAND,
PCI_CMD_INTX_OFF, 2);
size, PAGING_NON_COHERENT);
page_free(&remap_pool, device->msix_table, size / PAGE_SIZE);
- prev_msix_device = device->cell->msix_device_list;
- if (prev_msix_device == device) {
- device->cell->msix_device_list = device->next_msix_device;
- } else {
- while (prev_msix_device->next_msix_device != device)
- prev_msix_device = prev_msix_device->next_msix_device;
- prev_msix_device->next_msix_device = device->next_msix_device;
- }
+ if (device->msix_vectors != device->msix_vector_array)
+ page_free(&mem_pool, device->msix_vectors,
+ PAGES(sizeof(union pci_msix_vector) *
+ device->info->num_msix_vectors));
+
+ mmio_region_unregister(device->cell, device->info->msix_address);
}
/**
unsigned int ndev, ncap;
int err;
+ if (pci_space)
+ mmio_region_register(cell, mmcfg_start, mmcfg_size,
+ pci_mmconfig_access_handler, NULL);
+
+ if (cell->config->num_pci_devices == 0)
+ return 0;
+
cell->pci_devices = page_alloc(&mem_pool, devlist_pages);
if (!cell->pci_devices)
return -ENOMEM;
* handy pointers. The cell pointer also encodes active ownership.
*/
for (ndev = 0; ndev < cell->config->num_pci_devices; ndev++) {
- if (dev_infos[ndev].num_msix_vectors > PCI_MAX_MSIX_VECTORS) {
- err = trace_error(-ERANGE);
- goto error;
- }
-
device = &cell->pci_devices[ndev];
device->info = &dev_infos[ndev];
+ device->msix_vectors = device->msix_vector_array;
if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM) {
err = pci_ivshmem_init(cell, device);
if (err)
goto error;
- err = pci_add_virtual_device(cell, device);
- if (err)
- goto error;
+
+ device->cell = cell;
+
continue;
}
root_device = pci_get_assigned_device(&root_cell,
dev_infos[ndev].bdf);
if (root_device) {
- pci_remove_device(root_device);
+ pci_remove_physical_device(root_device);
root_device->cell = NULL;
}
- err = pci_add_device(cell, device);
+ err = pci_add_physical_device(cell, device);
if (err)
goto error;
for_each_configured_pci_device(root_device, &root_cell)
if (root_device->info->domain == device->info->domain &&
root_device->info->bdf == device->info->bdf) {
- if (pci_add_device(&root_cell, root_device) < 0)
+ if (pci_add_physical_device(&root_cell,
+ root_device) < 0)
printk("WARNING: Failed to re-assign PCI "
"device to root cell\n");
else
if (device->cell) {
if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM) {
pci_ivshmem_exit(device);
- pci_remove_virtual_device(device);
} else {
- pci_remove_device(device);
+ pci_remove_physical_device(device);
pci_return_device_to_root_cell(device);
}
}