};
static void *pci_space;
-static u64 mmcfg_start, mmcfg_end;
+static u64 mmcfg_start, mmcfg_size;
static u8 end_bus;
+unsigned int pci_mmio_count_regions(struct cell *cell)
+{
+ const struct jailhouse_pci_device *dev_infos =
+ jailhouse_cell_pci_devices(cell->config);
+ unsigned int n, regions = 0;
+
+ if (system_config->platform_info.x86.mmconfig_base)
+ regions++;
+
+ for (n = 0; n < cell->config->num_pci_devices; n++)
+ if (dev_infos[n].type == JAILHOUSE_PCI_TYPE_IVSHMEM)
+ regions += PCI_IVSHMEM_NUM_MMIO_REGIONS;
+ else if (dev_infos[n].msix_address)
+ regions++;
+
+ return regions;
+}
+
static void *pci_get_device_mmcfg_base(u16 bdf)
{
return pci_space + ((unsigned long)bdf << 12);
return PCI_ACCESS_DONE;
}
- if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
- return pci_ivshmem_cfg_read(device, address, size, value);
-
- /* Emulate BARs for physical devices */
- if (device->info->type == JAILHOUSE_PCI_TYPE_DEVICE) {
+ /* Emulate BARs for physical and virtual devices */
+ if (device->info->type != JAILHOUSE_PCI_TYPE_BRIDGE) {
/* Emulate BAR access, always returning the shadow value. */
if (address >= PCI_CFG_BAR && address <= PCI_CFG_BAR_END) {
bar_no = (address - PCI_CFG_BAR) / 4;
}
}
+ if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
+ return pci_ivshmem_cfg_read(device, address, value);
+
if (address < PCI_CONFIG_HEADER_SIZE)
return PCI_ACCESS_PERFORM;
value <<= bias_shift;
- if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
- return pci_ivshmem_cfg_write(device, address / 4, mask, value);
-
- /* Emulate BARs for physical devices */
- if (device->info->type == JAILHOUSE_PCI_TYPE_DEVICE &&
+ /* Emulate BARs for physical and virtual devices */
+ if (device->info->type != JAILHOUSE_PCI_TYPE_BRIDGE &&
address >= PCI_CFG_BAR && address <= PCI_CFG_BAR_END) {
bar_no = (address - PCI_CFG_BAR) / 4;
mask &= device->info->bar_mask[bar_no];
if (address < PCI_CONFIG_HEADER_SIZE) {
if (device->info->type == JAILHOUSE_PCI_TYPE_BRIDGE)
cfg_control = bridge_write[address / 4];
- else /* physical device */
+ else /* physical or virtual device */
cfg_control = endpoint_write[address / 4];
if ((cfg_control.mask & mask) != mask)
switch (cfg_control.type) {
case PCI_CONFIG_ALLOW:
+ if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
+ return pci_ivshmem_cfg_write(device,
+ address / 4, mask, value);
return PCI_ACCESS_PERFORM;
case PCI_CONFIG_RDONLY:
return PCI_ACCESS_DONE;
}
}
+ if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
+ return pci_ivshmem_cfg_write(device, address / 4, mask, value);
+
cap = pci_find_capability(device, address);
if (!cap || !(cap->flags & JAILHOUSE_PCICAPS_WRITE))
return PCI_ACCESS_REJECT;
*/
int pci_init(void)
{
- unsigned int mmcfg_size;
int err;
- err = pci_cell_init(&root_cell);
- if (err)
- return err;
-
mmcfg_start = system_config->platform_info.x86.mmconfig_base;
- if (mmcfg_start == 0)
- return 0;
+ if (mmcfg_start != 0) {
+ end_bus = system_config->platform_info.x86.mmconfig_end_bus;
+ mmcfg_size = (end_bus + 1) * 256 * 4096;
- end_bus = system_config->platform_info.x86.mmconfig_end_bus;
- mmcfg_size = (end_bus + 1) * 256 * 4096;
- mmcfg_end = mmcfg_start + mmcfg_size - 4;
+ pci_space = page_alloc(&remap_pool, mmcfg_size / PAGE_SIZE);
+ if (!pci_space)
+ return trace_error(-ENOMEM);
- pci_space = page_alloc(&remap_pool, mmcfg_size / PAGE_SIZE);
- if (!pci_space)
- return trace_error(-ENOMEM);
+ err = paging_create(&hv_paging_structs, mmcfg_start,
+ mmcfg_size, (unsigned long)pci_space,
+ PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
+ PAGING_NON_COHERENT);
+ if (err)
+ return err;
+ }
- return paging_create(&hv_paging_structs, mmcfg_start, mmcfg_size,
- (unsigned long)pci_space,
- PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
- PAGING_NON_COHERENT);
+ return pci_cell_init(&root_cell);
}
-static int pci_msix_access_handler(const struct cell *cell, bool is_write,
- u64 addr, u32 *value)
+static enum mmio_result pci_msix_access_handler(void *arg,
+ struct mmio_access *mmio)
{
- unsigned int dword = (addr % sizeof(union pci_msix_vector)) >> 2;
- struct pci_device *device = cell->msix_device_list;
+ unsigned int dword =
+ (mmio->address % sizeof(union pci_msix_vector)) >> 2;
+ struct pci_device *device = arg;
unsigned int index;
- u64 offs;
-
- while (device) {
- if (addr >= device->info->msix_address &&
- addr < device->info->msix_address +
- device->info->msix_region_size)
- goto found;
- device = device->next_msix_device;
- }
- return 0;
-found:
/* access must be DWORD-aligned */
- if (addr & 0x3)
+ if (mmio->address & 0x3)
goto invalid_access;
- offs = addr - device->info->msix_address;
- index = offs / sizeof(union pci_msix_vector);
+ index = mmio->address / sizeof(union pci_msix_vector);
- if (is_write) {
+ if (mmio->is_write) {
/*
* The PBA may share a page with the MSI-X table. Writing to
* PBA entries is undefined. We declare it as invalid.
if (index >= device->info->num_msix_vectors)
goto invalid_access;
- device->msix_vectors[index].raw[dword] = *value;
+ device->msix_vectors[index].raw[dword] = mmio->value;
if (arch_pci_update_msix_vector(device, index) < 0)
goto invalid_access;
if (dword == MSIX_VECTOR_CTRL_DWORD)
mmio_write32(&device->msix_table[index].raw[dword],
- *value);
+ mmio->value);
} else {
if (index >= device->info->num_msix_vectors ||
dword == MSIX_VECTOR_CTRL_DWORD)
- *value =
- mmio_read32(((void *)device->msix_table) + offs);
+ mmio->value = mmio_read32(((void *)device->msix_table) +
+ mmio->address);
else
- *value = device->msix_vectors[index].raw[dword];
+ mmio->value = device->msix_vectors[index].raw[dword];
}
- return 1;
+ return MMIO_HANDLED;
invalid_access:
panic_printk("FATAL: Invalid PCI MSI-X table/PBA access, device "
"%02x:%02x.%x\n", PCI_BDF_PARAMS(device->info->bdf));
- return -1;
+ return MMIO_ERROR;
}
-/**
- * Handler for MMIO-accesses to PCI config space.
- * @param cell Request issuing cell.
- * @param is_write True if write access.
- * @param addr Address accessed.
- * @param value Pointer to value for reading/writing.
- *
- * @return 1 if handled successfully, 0 if unhandled, -1 on access error.
- */
-int pci_mmio_access_handler(const struct cell *cell, bool is_write,
- u64 addr, u32 *value)
+static enum mmio_result pci_mmconfig_access_handler(void *arg,
+ struct mmio_access *mmio)
{
- u32 mmcfg_offset, reg_addr;
+ u32 reg_addr = mmio->address & 0xfff;
struct pci_device *device;
- enum pci_access access;
- int ret;
-
- if (!pci_space || addr < mmcfg_start || addr > mmcfg_end) {
- ret = pci_msix_access_handler(cell, is_write, addr, value);
- if (ret == 0)
- ret = ivshmem_mmio_access_handler(cell, is_write, addr,
- value);
- return ret;
- }
+ enum pci_access result;
+ u32 val;
- mmcfg_offset = addr - mmcfg_start;
- reg_addr = mmcfg_offset & 0xfff;
/* access must be DWORD-aligned */
if (reg_addr & 0x3)
goto invalid_access;
- device = pci_get_assigned_device(cell, mmcfg_offset >> 12);
+ device = pci_get_assigned_device(this_cell(), mmio->address >> 12);
- if (is_write) {
- access = pci_cfg_write_moderate(device, reg_addr, 4, *value);
- if (access == PCI_ACCESS_REJECT)
+ if (mmio->is_write) {
+ result = pci_cfg_write_moderate(device, reg_addr, 4,
+ mmio->value);
+ if (result == PCI_ACCESS_REJECT)
goto invalid_access;
- if (access == PCI_ACCESS_PERFORM)
- mmio_write32(pci_space + mmcfg_offset, *value);
+ if (result == PCI_ACCESS_PERFORM)
+ mmio_write32(pci_space + mmio->address, mmio->value);
} else {
- access = pci_cfg_read_moderate(device, reg_addr, 4, value);
- if (access == PCI_ACCESS_PERFORM)
- *value = mmio_read32(pci_space + mmcfg_offset);
+ result = pci_cfg_read_moderate(device, reg_addr, 4, &val);
+ if (result == PCI_ACCESS_PERFORM)
+ mmio->value = mmio_read32(pci_space + mmio->address);
+ else
+ mmio->value = val;
}
- return 1;
+ return MMIO_HANDLED;
invalid_access:
panic_printk("FATAL: Invalid PCI MMCONFIG write, device %02x:%02x.%x, "
- "reg: %\n", PCI_BDF_PARAMS(mmcfg_offset >> 12), reg_addr);
- return -1;
+ "reg: %x\n", PCI_BDF_PARAMS(mmio->address >> 12),
+ reg_addr);
+ return MMIO_ERROR;
}
}
}
-static int pci_add_virtual_device(struct cell *cell, struct pci_device *device)
-{
- device->cell = cell;
- device->next_virtual_device = cell->virtual_device_list;
- cell->virtual_device_list = device;
- return 0;
-}
-
static int pci_add_physical_device(struct cell *cell, struct pci_device *device)
{
- unsigned int n, size = device->info->msix_region_size;
+ unsigned int n, pages, size = device->info->msix_region_size;
int err;
printk("Adding PCI device %02x:%02x.%x to cell \"%s\"\n",
if (err)
goto error_page_free;
- device->next_msix_device = cell->msix_device_list;
- cell->msix_device_list = device;
+ if (device->info->num_msix_vectors > PCI_EMBEDDED_MSIX_VECTS) {
+ pages = PAGES(sizeof(union pci_msix_vector) *
+ device->info->num_msix_vectors);
+ device->msix_vectors = page_alloc(&mem_pool, pages);
+ if (!device->msix_vectors) {
+ err = -ENOMEM;
+ goto error_unmap_table;
+ }
+ }
+
+ mmio_region_register(cell, device->info->msix_address, size,
+ pci_msix_access_handler, device);
}
return err;
+error_unmap_table:
+ /* cannot fail, destruction of same size as construction */
+ paging_destroy(&hv_paging_structs, (unsigned long)device->msix_table,
+ size, PAGING_NON_COHERENT);
error_page_free:
page_free(&remap_pool, device->msix_table, size / PAGE_SIZE);
error_remove_dev:
return err;
}
-static void pci_remove_virtual_device(struct pci_device *device)
-{
- struct pci_device *prev = device->cell->virtual_device_list;
-
- if (prev == device) {
- device->cell->virtual_device_list = device->next_virtual_device;
- } else {
- while (prev->next_virtual_device != device)
- prev = prev->next_virtual_device;
- prev->next_virtual_device = device->next_virtual_device;
- }
-}
-
static void pci_remove_physical_device(struct pci_device *device)
{
unsigned int size = device->info->msix_region_size;
- struct pci_device *prev_msix_device;
printk("Removing PCI device %02x:%02x.%x from cell \"%s\"\n",
PCI_BDF_PARAMS(device->info->bdf), device->cell->config->name);
size, PAGING_NON_COHERENT);
page_free(&remap_pool, device->msix_table, size / PAGE_SIZE);
- prev_msix_device = device->cell->msix_device_list;
- if (prev_msix_device == device) {
- device->cell->msix_device_list = device->next_msix_device;
- } else {
- while (prev_msix_device->next_msix_device != device)
- prev_msix_device = prev_msix_device->next_msix_device;
- prev_msix_device->next_msix_device = device->next_msix_device;
- }
+ if (device->msix_vectors != device->msix_vector_array)
+ page_free(&mem_pool, device->msix_vectors,
+ PAGES(sizeof(union pci_msix_vector) *
+ device->info->num_msix_vectors));
+
+ mmio_region_unregister(device->cell, device->info->msix_address);
}
/**
unsigned int ndev, ncap;
int err;
+ if (pci_space)
+ mmio_region_register(cell, mmcfg_start, mmcfg_size,
+ pci_mmconfig_access_handler, NULL);
+
+ if (cell->config->num_pci_devices == 0)
+ return 0;
+
cell->pci_devices = page_alloc(&mem_pool, devlist_pages);
if (!cell->pci_devices)
return -ENOMEM;
* handy pointers. The cell pointer also encodes active ownership.
*/
for (ndev = 0; ndev < cell->config->num_pci_devices; ndev++) {
- if (dev_infos[ndev].num_msix_vectors > PCI_MAX_MSIX_VECTORS) {
- err = trace_error(-ERANGE);
- goto error;
- }
-
device = &cell->pci_devices[ndev];
device->info = &dev_infos[ndev];
+ device->msix_vectors = device->msix_vector_array;
if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM) {
err = pci_ivshmem_init(cell, device);
if (err)
goto error;
- err = pci_add_virtual_device(cell, device);
- if (err)
- goto error;
+
+ device->cell = cell;
+
continue;
}
if (device->cell) {
if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM) {
pci_ivshmem_exit(device);
- pci_remove_virtual_device(device);
} else {
pci_remove_physical_device(device);
pci_return_device_to_root_cell(device);