2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2014, 2015
7 * Ivan Kolchin <ivan.kolchin@siemens.com>
8 * Jan Kiszka <jan.kiszka@siemens.com>
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
14 #include <jailhouse/control.h>
15 #include <jailhouse/mmio.h>
16 #include <jailhouse/pci.h>
17 #include <jailhouse/printk.h>
18 #include <jailhouse/utils.h>
20 #define MSIX_VECTOR_CTRL_DWORD 3
22 #define for_each_configured_pci_device(dev, cell) \
23 for ((dev) = (cell)->pci_devices; \
24 (dev) - (cell)->pci_devices < (cell)->config->num_pci_devices; \
27 #define for_each_pci_cap(cap, dev, counter) \
28 for ((cap) = jailhouse_cell_pci_caps((dev)->cell->config) + \
29 (dev)->info->caps_start, (counter) = 0; \
30 (counter) < (dev)->info->num_caps; \
33 /* entry for PCI config space access control */
34 struct pci_cfg_control {
39 } type; /* Access type */
40 u32 mask; /* Bit set: access type applies; bit cleared: deny access */
43 /* --- Access control for writing to PCI config space registers --- */
44 /* Type 1: Endpoints */
45 static const struct pci_cfg_control endpoint_write[PCI_CONFIG_HEADER_SIZE] = {
46 [0x04/4] = {PCI_CONFIG_ALLOW, 0xffffffff}, /* Command, Status */
47 [0x0c/4] = {PCI_CONFIG_ALLOW, 0xff00ffff}, /* BIST, Lat., Cacheline */
48 [0x30/4] = {PCI_CONFIG_RDONLY, 0xffffffff}, /* ROM BAR */
49 [0x3c/4] = {PCI_CONFIG_ALLOW, 0x000000ff}, /* Int Line */
53 * Note: Ignore limit/base reprogramming attempts because the root cell will
54 * perform them on bus rescans. */
55 static const struct pci_cfg_control bridge_write[PCI_CONFIG_HEADER_SIZE] = {
56 [0x04/4] = {PCI_CONFIG_ALLOW, 0xffffffff}, /* Command, Status */
57 [0x0c/4] = {PCI_CONFIG_ALLOW, 0xff00ffff}, /* BIST, Lat., Cacheline */
58 [0x1c/4] = {PCI_CONFIG_RDONLY, 0x0000ffff}, /* I/O Limit & Base */
59 [0x20/4 ... /* Memory Limit/Base, Prefetch Memory Limit/Base, */
60 0x30/4] = {PCI_CONFIG_RDONLY, 0xffffffff}, /* I/O Limit & Base */
61 [0x3c/4] = {PCI_CONFIG_ALLOW, 0xffff00ff}, /* Int Line, Bridge Ctrl */
64 static void *pci_space;
65 static u64 mmcfg_start, mmcfg_end;
68 unsigned int pci_mmio_count_regions(struct cell *cell)
70 const struct jailhouse_pci_device *dev_infos =
71 jailhouse_cell_pci_devices(cell->config);
72 unsigned int n, regions = 0;
74 for (n = 0; n < cell->config->num_pci_devices; n++)
75 if (dev_infos[n].type == JAILHOUSE_PCI_TYPE_IVSHMEM)
76 regions += PCI_IVSHMEM_NUM_MMIO_REGIONS;
81 static void *pci_get_device_mmcfg_base(u16 bdf)
83 return pci_space + ((unsigned long)bdf << 12);
87 * Read from PCI config space.
88 * @param bdf 16-bit bus/device/function ID of target.
89 * @param address Config space access address.
90 * @param size Access size (1, 2 or 4 bytes).
94 * @see pci_write_config
96 u32 pci_read_config(u16 bdf, u16 address, unsigned int size)
98 void *mmcfg_addr = pci_get_device_mmcfg_base(bdf) + address;
100 if (!pci_space || PCI_BUS(bdf) > end_bus)
101 return arch_pci_read_config(bdf, address, size);
104 return mmio_read8(mmcfg_addr);
106 return mmio_read16(mmcfg_addr);
108 return mmio_read32(mmcfg_addr);
112 * Write to PCI config space.
113 * @param bdf 16-bit bus/device/function ID of target.
114 * @param address Config space access address.
115 * @param value Value to be written.
116 * @param size Access size (1, 2 or 4 bytes).
118 * @see pci_read_config
120 void pci_write_config(u16 bdf, u16 address, u32 value, unsigned int size)
122 void *mmcfg_addr = pci_get_device_mmcfg_base(bdf) + address;
124 if (!pci_space || PCI_BUS(bdf) > end_bus)
125 return arch_pci_write_config(bdf, address, value, size);
128 mmio_write8(mmcfg_addr, value);
130 mmio_write16(mmcfg_addr, value);
132 mmio_write32(mmcfg_addr, value);
136 * Look up device owned by a cell.
137 * @param[in] cell Owning cell.
138 * @param bdf 16-bit bus/device/function ID.
140 * @return Pointer to owned PCI device or NULL.
142 struct pci_device *pci_get_assigned_device(const struct cell *cell, u16 bdf)
144 const struct jailhouse_pci_device *dev_info =
145 jailhouse_cell_pci_devices(cell->config);
148 /* We iterate over the static device information to increase cache
150 for (n = 0; n < cell->config->num_pci_devices; n++)
151 if (dev_info[n].bdf == bdf)
152 return cell->pci_devices[n].cell ?
153 &cell->pci_devices[n] : NULL;
159 * Look up capability at given config space address.
160 * @param device The device to be accessed.
161 * @param address Config space access address.
163 * @return Corresponding capability structure or NULL if none found.
167 static const struct jailhouse_pci_capability *
168 pci_find_capability(struct pci_device *device, u16 address)
170 const struct jailhouse_pci_capability *cap =
171 jailhouse_cell_pci_caps(device->cell->config) +
172 device->info->caps_start;
175 for (n = 0; n < device->info->num_caps; n++, cap++)
176 if (cap->start <= address && cap->start + cap->len > address)
183 * Moderate config space read access.
184 * @param device The device to be accessed. If NULL, access will be
185 * emulated, returning a value of -1.
186 * @param address Config space address.
187 * @param size Access size (1, 2 or 4 bytes).
188 * @param value Pointer to buffer to receive the emulated value if
189 * PCI_ACCESS_DONE is returned.
191 * @return PCI_ACCESS_PERFORM or PCI_ACCESS_DONE.
193 * @see pci_cfg_write_moderate
195 enum pci_access pci_cfg_read_moderate(struct pci_device *device, u16 address,
196 unsigned int size, u32 *value)
198 const struct jailhouse_pci_capability *cap;
199 unsigned int bar_no, cap_offs;
203 return PCI_ACCESS_DONE;
206 /* Emulate BARs for physical and virtual devices */
207 if (device->info->type != JAILHOUSE_PCI_TYPE_BRIDGE) {
208 /* Emulate BAR access, always returning the shadow value. */
209 if (address >= PCI_CFG_BAR && address <= PCI_CFG_BAR_END) {
210 bar_no = (address - PCI_CFG_BAR) / 4;
211 *value = device->bar[bar_no] >> ((address % 4) * 8);
212 return PCI_ACCESS_DONE;
215 /* We do not expose ROMs. */
216 if (address >= PCI_CFG_ROMBAR && address < PCI_CFG_CAPS) {
218 return PCI_ACCESS_DONE;
222 if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
223 return pci_ivshmem_cfg_read(device, address, value);
225 if (address < PCI_CONFIG_HEADER_SIZE)
226 return PCI_ACCESS_PERFORM;
228 cap = pci_find_capability(device, address);
230 return PCI_ACCESS_PERFORM;
232 cap_offs = address - cap->start;
233 if (cap->id == PCI_CAP_MSI && cap_offs >= 4 &&
234 (cap_offs < 10 || (device->info->msi_64bits && cap_offs < 14))) {
235 *value = device->msi_registers.raw[cap_offs / 4] >>
236 ((cap_offs % 4) * 8);
237 return PCI_ACCESS_DONE;
240 return PCI_ACCESS_PERFORM;
243 static int pci_update_msix(struct pci_device *device,
244 const struct jailhouse_pci_capability *cap)
249 for (n = 0; n < device->info->num_msix_vectors; n++) {
250 result = arch_pci_update_msix_vector(device, n);
258 * Moderate config space write access.
259 * @param device The device to be accessed. If NULL, access will be
261 * @param address Config space address.
262 * @param size Access size (1, 2 or 4 bytes).
263 * @param value Value to be written.
265 * @return PCI_ACCESS_REJECT, PCI_ACCESS_PERFORM or PCI_ACCESS_DONE.
267 * @see pci_cfg_read_moderate
269 enum pci_access pci_cfg_write_moderate(struct pci_device *device, u16 address,
270 unsigned int size, u32 value)
272 const struct jailhouse_pci_capability *cap;
273 /* initialize list to work around wrong compiler warning */
274 unsigned int bias_shift = (address % 4) * 8;
275 u32 mask = BYTE_MASK(size) << bias_shift;
276 struct pci_cfg_control cfg_control;
277 unsigned int bar_no, cap_offs;
280 return PCI_ACCESS_REJECT;
282 value <<= bias_shift;
284 /* Emulate BARs for physical and virtual devices */
285 if (device->info->type != JAILHOUSE_PCI_TYPE_BRIDGE &&
286 address >= PCI_CFG_BAR && address <= PCI_CFG_BAR_END) {
287 bar_no = (address - PCI_CFG_BAR) / 4;
288 mask &= device->info->bar_mask[bar_no];
289 device->bar[bar_no] &= ~mask;
290 device->bar[bar_no] |= value & mask;
291 return PCI_ACCESS_DONE;
294 if (address < PCI_CONFIG_HEADER_SIZE) {
295 if (device->info->type == JAILHOUSE_PCI_TYPE_BRIDGE)
296 cfg_control = bridge_write[address / 4];
297 else /* physical or virtual device */
298 cfg_control = endpoint_write[address / 4];
300 if ((cfg_control.mask & mask) != mask)
301 return PCI_ACCESS_REJECT;
303 switch (cfg_control.type) {
304 case PCI_CONFIG_ALLOW:
305 if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
306 return pci_ivshmem_cfg_write(device,
307 address / 4, mask, value);
308 return PCI_ACCESS_PERFORM;
309 case PCI_CONFIG_RDONLY:
310 return PCI_ACCESS_DONE;
312 return PCI_ACCESS_REJECT;
316 if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
317 return pci_ivshmem_cfg_write(device, address / 4, mask, value);
319 cap = pci_find_capability(device, address);
320 if (!cap || !(cap->flags & JAILHOUSE_PCICAPS_WRITE))
321 return PCI_ACCESS_REJECT;
323 cap_offs = address - cap->start;
324 if (cap->id == PCI_CAP_MSI &&
325 (cap_offs < 10 || (device->info->msi_64bits && cap_offs < 14))) {
326 device->msi_registers.raw[cap_offs / 4] &= ~mask;
327 device->msi_registers.raw[cap_offs / 4] |= value;
329 if (arch_pci_update_msi(device, cap) < 0)
330 return PCI_ACCESS_REJECT;
333 * Address and data words are emulated, the control word is
337 return PCI_ACCESS_DONE;
338 } else if (cap->id == PCI_CAP_MSIX && cap_offs < 4) {
339 device->msix_registers.raw &= ~mask;
340 device->msix_registers.raw |= value;
342 if (pci_update_msix(device, cap) < 0)
343 return PCI_ACCESS_REJECT;
346 return PCI_ACCESS_PERFORM;
350 * Initialization of PCI subsystem.
352 * @return 0 on success, negative error code otherwise.
356 unsigned int mmcfg_size;
359 err = pci_cell_init(&root_cell);
363 mmcfg_start = system_config->platform_info.x86.mmconfig_base;
364 if (mmcfg_start == 0)
367 end_bus = system_config->platform_info.x86.mmconfig_end_bus;
368 mmcfg_size = (end_bus + 1) * 256 * 4096;
369 mmcfg_end = mmcfg_start + mmcfg_size - 4;
371 pci_space = page_alloc(&remap_pool, mmcfg_size / PAGE_SIZE);
373 return trace_error(-ENOMEM);
375 return paging_create(&hv_paging_structs, mmcfg_start, mmcfg_size,
376 (unsigned long)pci_space,
377 PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
378 PAGING_NON_COHERENT);
381 static int pci_msix_access_handler(const struct cell *cell, bool is_write,
382 u64 addr, u32 *value)
384 unsigned int dword = (addr % sizeof(union pci_msix_vector)) >> 2;
385 struct pci_device *device = cell->msix_device_list;
390 if (addr >= device->info->msix_address &&
391 addr < device->info->msix_address +
392 device->info->msix_region_size)
394 device = device->next_msix_device;
399 /* access must be DWORD-aligned */
403 offs = addr - device->info->msix_address;
404 index = offs / sizeof(union pci_msix_vector);
408 * The PBA may share a page with the MSI-X table. Writing to
409 * PBA entries is undefined. We declare it as invalid.
411 if (index >= device->info->num_msix_vectors)
414 device->msix_vectors[index].raw[dword] = *value;
415 if (arch_pci_update_msix_vector(device, index) < 0)
418 if (dword == MSIX_VECTOR_CTRL_DWORD)
419 mmio_write32(&device->msix_table[index].raw[dword],
422 if (index >= device->info->num_msix_vectors ||
423 dword == MSIX_VECTOR_CTRL_DWORD)
425 mmio_read32(((void *)device->msix_table) + offs);
427 *value = device->msix_vectors[index].raw[dword];
432 panic_printk("FATAL: Invalid PCI MSI-X table/PBA access, device "
433 "%02x:%02x.%x\n", PCI_BDF_PARAMS(device->info->bdf));
438 * Handler for MMIO-accesses to PCI config space.
439 * @param cell Request issuing cell.
440 * @param is_write True if write access.
441 * @param addr Address accessed.
442 * @param value Pointer to value for reading/writing.
444 * @return 1 if handled successfully, 0 if unhandled, -1 on access error.
446 int pci_mmio_access_handler(const struct cell *cell, bool is_write,
447 u64 addr, u32 *value)
449 u32 mmcfg_offset, reg_addr;
450 struct pci_device *device;
451 enum pci_access access;
453 if (!pci_space || addr < mmcfg_start || addr > mmcfg_end)
454 return pci_msix_access_handler(cell, is_write, addr, value);
456 mmcfg_offset = addr - mmcfg_start;
457 reg_addr = mmcfg_offset & 0xfff;
458 /* access must be DWORD-aligned */
462 device = pci_get_assigned_device(cell, mmcfg_offset >> 12);
465 access = pci_cfg_write_moderate(device, reg_addr, 4, *value);
466 if (access == PCI_ACCESS_REJECT)
468 if (access == PCI_ACCESS_PERFORM)
469 mmio_write32(pci_space + mmcfg_offset, *value);
471 access = pci_cfg_read_moderate(device, reg_addr, 4, value);
472 if (access == PCI_ACCESS_PERFORM)
473 *value = mmio_read32(pci_space + mmcfg_offset);
479 panic_printk("FATAL: Invalid PCI MMCONFIG write, device %02x:%02x.%x, "
480 "reg: %\n", PCI_BDF_PARAMS(mmcfg_offset >> 12), reg_addr);
486 * Retrieve number of enabled MSI vector of a device.
487 * @param device The device to be examined.
489 * @return number of vectors.
491 unsigned int pci_enabled_msi_vectors(struct pci_device *device)
493 return device->msi_registers.msg32.enable ?
494 1 << device->msi_registers.msg32.mme : 0;
497 static void pci_save_msi(struct pci_device *device,
498 const struct jailhouse_pci_capability *cap)
500 u16 bdf = device->info->bdf;
503 for (n = 0; n < (device->info->msi_64bits ? 4 : 3); n++)
504 device->msi_registers.raw[n] =
505 pci_read_config(bdf, cap->start + n * 4, 4);
508 static void pci_restore_msi(struct pci_device *device,
509 const struct jailhouse_pci_capability *cap)
513 for (n = 1; n < (device->info->msi_64bits ? 4 : 3); n++)
514 pci_write_config(device->info->bdf, cap->start + n * 4,
515 device->msi_registers.raw[n], 4);
518 static void pci_suppress_msix(struct pci_device *device,
519 const struct jailhouse_pci_capability *cap,
522 union pci_msix_registers regs = device->msix_registers;
526 pci_write_config(device->info->bdf, cap->start, regs.raw, 4);
529 static void pci_save_msix(struct pci_device *device,
530 const struct jailhouse_pci_capability *cap)
534 device->msix_registers.raw =
535 pci_read_config(device->info->bdf, cap->start, 4);
537 for (n = 0; n < device->info->num_msix_vectors; n++)
538 for (r = 0; r < 4; r++)
539 device->msix_vectors[n].raw[r] =
540 mmio_read32(&device->msix_table[n].raw[r]);
543 static void pci_restore_msix(struct pci_device *device,
544 const struct jailhouse_pci_capability *cap)
548 for (n = 0; n < device->info->num_msix_vectors; n++)
549 /* only restore address/data, control is write-through */
550 for (r = 0; r < 3; r++)
551 mmio_write32(&device->msix_table[n].raw[r],
552 device->msix_vectors[n].raw[r]);
553 pci_suppress_msix(device, cap, false);
557 * Prepare the handover of PCI devices to Jailhouse or back to Linux.
559 void pci_prepare_handover(void)
561 const struct jailhouse_pci_capability *cap;
562 struct pci_device *device;
565 if (!root_cell.pci_devices)
568 for_each_configured_pci_device(device, &root_cell) {
570 for_each_pci_cap(cap, device, n)
571 if (cap->id == PCI_CAP_MSI)
572 arch_pci_suppress_msi(device, cap);
573 else if (cap->id == PCI_CAP_MSIX)
574 pci_suppress_msix(device, cap, true);
578 static int pci_add_virtual_device(struct cell *cell, struct pci_device *device)
581 device->next_virtual_device = cell->virtual_device_list;
582 cell->virtual_device_list = device;
586 static int pci_add_physical_device(struct cell *cell, struct pci_device *device)
588 unsigned int n, pages, size = device->info->msix_region_size;
591 printk("Adding PCI device %02x:%02x.%x to cell \"%s\"\n",
592 PCI_BDF_PARAMS(device->info->bdf), cell->config->name);
594 for (n = 0; n < PCI_NUM_BARS; n ++)
595 device->bar[n] = pci_read_config(device->info->bdf,
596 PCI_CFG_BAR + n * 4, 4);
598 err = arch_pci_add_physical_device(cell, device);
600 if (!err && device->info->msix_address) {
601 device->msix_table = page_alloc(&remap_pool, size / PAGE_SIZE);
602 if (!device->msix_table) {
603 err = trace_error(-ENOMEM);
604 goto error_remove_dev;
607 err = paging_create(&hv_paging_structs,
608 device->info->msix_address, size,
609 (unsigned long)device->msix_table,
610 PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
611 PAGING_NON_COHERENT);
613 goto error_page_free;
615 if (device->info->num_msix_vectors > PCI_EMBEDDED_MSIX_VECTS) {
616 pages = PAGES(sizeof(union pci_msix_vector) *
617 device->info->num_msix_vectors);
618 device->msix_vectors = page_alloc(&mem_pool, pages);
619 if (!device->msix_vectors) {
621 goto error_unmap_table;
625 device->next_msix_device = cell->msix_device_list;
626 cell->msix_device_list = device;
631 /* cannot fail, destruction of same size as construction */
632 paging_destroy(&hv_paging_structs, (unsigned long)device->msix_table,
633 size, PAGING_NON_COHERENT);
635 page_free(&remap_pool, device->msix_table, size / PAGE_SIZE);
637 arch_pci_remove_physical_device(device);
641 static void pci_remove_virtual_device(struct pci_device *device)
643 struct pci_device *prev = device->cell->virtual_device_list;
645 if (prev == device) {
646 device->cell->virtual_device_list = device->next_virtual_device;
648 while (prev->next_virtual_device != device)
649 prev = prev->next_virtual_device;
650 prev->next_virtual_device = device->next_virtual_device;
654 static void pci_remove_physical_device(struct pci_device *device)
656 unsigned int size = device->info->msix_region_size;
657 struct pci_device *prev_msix_device;
659 printk("Removing PCI device %02x:%02x.%x from cell \"%s\"\n",
660 PCI_BDF_PARAMS(device->info->bdf), device->cell->config->name);
661 arch_pci_remove_physical_device(device);
662 pci_write_config(device->info->bdf, PCI_CFG_COMMAND,
663 PCI_CMD_INTX_OFF, 2);
665 if (!device->msix_table)
668 /* cannot fail, destruction of same size as construction */
669 paging_destroy(&hv_paging_structs, (unsigned long)device->msix_table,
670 size, PAGING_NON_COHERENT);
671 page_free(&remap_pool, device->msix_table, size / PAGE_SIZE);
673 if (device->msix_vectors != device->msix_vector_array)
674 page_free(&mem_pool, device->msix_vectors,
675 PAGES(sizeof(union pci_msix_vector) *
676 device->info->num_msix_vectors));
678 prev_msix_device = device->cell->msix_device_list;
679 if (prev_msix_device == device) {
680 device->cell->msix_device_list = device->next_msix_device;
682 while (prev_msix_device->next_msix_device != device)
683 prev_msix_device = prev_msix_device->next_msix_device;
684 prev_msix_device->next_msix_device = device->next_msix_device;
689 * Perform PCI-specific initialization for a new cell.
690 * @param cell Cell to be initialized.
692 * @return 0 on success, negative error code otherwise.
696 int pci_cell_init(struct cell *cell)
698 unsigned int devlist_pages = PAGES(cell->config->num_pci_devices *
699 sizeof(struct pci_device));
700 const struct jailhouse_pci_device *dev_infos =
701 jailhouse_cell_pci_devices(cell->config);
702 const struct jailhouse_pci_capability *cap;
703 struct pci_device *device, *root_device;
704 unsigned int ndev, ncap;
707 cell->pci_devices = page_alloc(&mem_pool, devlist_pages);
708 if (!cell->pci_devices)
712 * We order device states in the same way as the static information
713 * so that we can use the index of the latter to find the former. For
714 * the other way around and for obtaining the owner cell, we use more
715 * handy pointers. The cell pointer also encodes active ownership.
717 for (ndev = 0; ndev < cell->config->num_pci_devices; ndev++) {
718 device = &cell->pci_devices[ndev];
719 device->info = &dev_infos[ndev];
720 device->msix_vectors = device->msix_vector_array;
722 if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM) {
723 err = pci_ivshmem_init(cell, device);
726 err = pci_add_virtual_device(cell, device);
732 root_device = pci_get_assigned_device(&root_cell,
733 dev_infos[ndev].bdf);
735 pci_remove_physical_device(root_device);
736 root_device->cell = NULL;
739 err = pci_add_physical_device(cell, device);
745 for_each_pci_cap(cap, device, ncap)
746 if (cap->id == PCI_CAP_MSI)
747 pci_save_msi(device, cap);
748 else if (cap->id == PCI_CAP_MSIX)
749 pci_save_msix(device, cap);
752 if (cell == &root_cell)
753 pci_prepare_handover();
761 static void pci_return_device_to_root_cell(struct pci_device *device)
763 struct pci_device *root_device;
765 for_each_configured_pci_device(root_device, &root_cell)
766 if (root_device->info->domain == device->info->domain &&
767 root_device->info->bdf == device->info->bdf) {
768 if (pci_add_physical_device(&root_cell,
770 printk("WARNING: Failed to re-assign PCI "
771 "device to root cell\n");
773 root_device->cell = &root_cell;
779 * Perform PCI-specific cleanup for a cell under destruction.
780 * @param cell Cell to be destructed.
784 void pci_cell_exit(struct cell *cell)
786 unsigned int devlist_pages = PAGES(cell->config->num_pci_devices *
787 sizeof(struct pci_device));
788 struct pci_device *device;
791 * Do not destroy the root cell. We will shut down the complete
792 * hypervisor instead.
794 if (cell == &root_cell)
797 for_each_configured_pci_device(device, cell)
799 if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM) {
800 pci_ivshmem_exit(device);
801 pci_remove_virtual_device(device);
803 pci_remove_physical_device(device);
804 pci_return_device_to_root_cell(device);
808 page_free(&mem_pool, cell->pci_devices, devlist_pages);
812 * Apply PCI-specific configuration changes.
813 * @param cell_added_removed Cell that was added or removed to/from the
816 * @see arch_config_commit
818 void pci_config_commit(struct cell *cell_added_removed)
820 const struct jailhouse_pci_capability *cap;
821 struct pci_device *device;
825 if (!cell_added_removed)
828 for_each_configured_pci_device(device, &root_cell)
830 for_each_pci_cap(cap, device, n) {
831 if (cap->id == PCI_CAP_MSI) {
832 err = arch_pci_update_msi(device, cap);
833 } else if (cap->id == PCI_CAP_MSIX) {
834 err = pci_update_msix(device, cap);
835 pci_suppress_msix(device, cap, false);
840 if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM) {
841 err = pci_ivshmem_update_msix(device);
851 panic_printk("FATAL: Unsupported MSI/MSI-X state, device %02x:%02x.%x",
852 PCI_BDF_PARAMS(device->info->bdf));
854 panic_printk(", cap %d\n", cap->id);
861 * Shut down the PCI layer during hypervisor deactivation.
863 void pci_shutdown(void)
865 const struct jailhouse_pci_capability *cap;
866 struct pci_device *device;
869 if (!root_cell.pci_devices)
872 for_each_configured_pci_device(device, &root_cell) {
876 for_each_pci_cap(cap, device, n)
877 if (cap->id == PCI_CAP_MSI)
878 pci_restore_msi(device, cap);
879 else if (cap->id == PCI_CAP_MSIX)
880 pci_restore_msix(device, cap);
882 if (device->cell != &root_cell)
883 pci_write_config(device->info->bdf, PCI_CFG_COMMAND,
884 PCI_CMD_INTX_OFF, 2);