2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2014, 2015
7 * Ivan Kolchin <ivan.kolchin@siemens.com>
8 * Jan Kiszka <jan.kiszka@siemens.com>
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
14 #include <jailhouse/control.h>
15 #include <jailhouse/mmio.h>
16 #include <jailhouse/pci.h>
17 #include <jailhouse/printk.h>
18 #include <jailhouse/utils.h>
20 #define MSIX_VECTOR_CTRL_DWORD 3
22 #define for_each_configured_pci_device(dev, cell) \
23 for ((dev) = (cell)->pci_devices; \
24 (dev) - (cell)->pci_devices < (cell)->config->num_pci_devices; \
27 #define for_each_pci_cap(cap, dev, counter) \
28 for ((cap) = jailhouse_cell_pci_caps((dev)->cell->config) + \
29 (dev)->info->caps_start, (counter) = 0; \
30 (counter) < (dev)->info->num_caps; \
33 /* entry for PCI config space access control */
34 struct pci_cfg_control {
39 } type; /* Access type */
40 u32 mask; /* Bit set: access type applies; bit cleared: deny access */
43 /* --- Access control for writing to PCI config space registers --- */
44 /* Type 1: Endpoints */
45 static const struct pci_cfg_control endpoint_write[PCI_CONFIG_HEADER_SIZE] = {
46 [0x04/4] = {PCI_CONFIG_ALLOW, 0xffffffff}, /* Command, Status */
47 [0x0c/4] = {PCI_CONFIG_ALLOW, 0xff00ffff}, /* BIST, Lat., Cacheline */
48 [0x30/4] = {PCI_CONFIG_RDONLY, 0xffffffff}, /* ROM BAR */
49 [0x3c/4] = {PCI_CONFIG_ALLOW, 0x000000ff}, /* Int Line */
53 * Note: Ignore limit/base reprogramming attempts because the root cell will
54 * perform them on bus rescans. */
55 static const struct pci_cfg_control bridge_write[PCI_CONFIG_HEADER_SIZE] = {
56 [0x04/4] = {PCI_CONFIG_ALLOW, 0xffffffff}, /* Command, Status */
57 [0x0c/4] = {PCI_CONFIG_ALLOW, 0xff00ffff}, /* BIST, Lat., Cacheline */
58 [0x1c/4] = {PCI_CONFIG_RDONLY, 0x0000ffff}, /* I/O Limit & Base */
59 [0x20/4 ... /* Memory Limit/Base, Prefetch Memory Limit/Base, */
60 0x30/4] = {PCI_CONFIG_RDONLY, 0xffffffff}, /* I/O Limit & Base */
61 [0x3c/4] = {PCI_CONFIG_ALLOW, 0xffff00ff}, /* Int Line, Bridge Ctrl */
64 static void *pci_space;
65 static u64 mmcfg_start, mmcfg_size;
68 unsigned int pci_mmio_count_regions(struct cell *cell)
70 const struct jailhouse_pci_device *dev_infos =
71 jailhouse_cell_pci_devices(cell->config);
72 unsigned int n, regions = 0;
74 if (system_config->platform_info.x86.mmconfig_base)
77 for (n = 0; n < cell->config->num_pci_devices; n++)
78 if (dev_infos[n].type == JAILHOUSE_PCI_TYPE_IVSHMEM)
79 regions += PCI_IVSHMEM_NUM_MMIO_REGIONS;
84 static void *pci_get_device_mmcfg_base(u16 bdf)
86 return pci_space + ((unsigned long)bdf << 12);
90 * Read from PCI config space.
91 * @param bdf 16-bit bus/device/function ID of target.
92 * @param address Config space access address.
93 * @param size Access size (1, 2 or 4 bytes).
97 * @see pci_write_config
99 u32 pci_read_config(u16 bdf, u16 address, unsigned int size)
101 void *mmcfg_addr = pci_get_device_mmcfg_base(bdf) + address;
103 if (!pci_space || PCI_BUS(bdf) > end_bus)
104 return arch_pci_read_config(bdf, address, size);
107 return mmio_read8(mmcfg_addr);
109 return mmio_read16(mmcfg_addr);
111 return mmio_read32(mmcfg_addr);
115 * Write to PCI config space.
116 * @param bdf 16-bit bus/device/function ID of target.
117 * @param address Config space access address.
118 * @param value Value to be written.
119 * @param size Access size (1, 2 or 4 bytes).
121 * @see pci_read_config
123 void pci_write_config(u16 bdf, u16 address, u32 value, unsigned int size)
125 void *mmcfg_addr = pci_get_device_mmcfg_base(bdf) + address;
127 if (!pci_space || PCI_BUS(bdf) > end_bus)
128 return arch_pci_write_config(bdf, address, value, size);
131 mmio_write8(mmcfg_addr, value);
133 mmio_write16(mmcfg_addr, value);
135 mmio_write32(mmcfg_addr, value);
139 * Look up device owned by a cell.
140 * @param[in] cell Owning cell.
141 * @param bdf 16-bit bus/device/function ID.
143 * @return Pointer to owned PCI device or NULL.
145 struct pci_device *pci_get_assigned_device(const struct cell *cell, u16 bdf)
147 const struct jailhouse_pci_device *dev_info =
148 jailhouse_cell_pci_devices(cell->config);
151 /* We iterate over the static device information to increase cache
153 for (n = 0; n < cell->config->num_pci_devices; n++)
154 if (dev_info[n].bdf == bdf)
155 return cell->pci_devices[n].cell ?
156 &cell->pci_devices[n] : NULL;
162 * Look up capability at given config space address.
163 * @param device The device to be accessed.
164 * @param address Config space access address.
166 * @return Corresponding capability structure or NULL if none found.
170 static const struct jailhouse_pci_capability *
171 pci_find_capability(struct pci_device *device, u16 address)
173 const struct jailhouse_pci_capability *cap =
174 jailhouse_cell_pci_caps(device->cell->config) +
175 device->info->caps_start;
178 for (n = 0; n < device->info->num_caps; n++, cap++)
179 if (cap->start <= address && cap->start + cap->len > address)
186 * Moderate config space read access.
187 * @param device The device to be accessed. If NULL, access will be
188 * emulated, returning a value of -1.
189 * @param address Config space address.
190 * @param size Access size (1, 2 or 4 bytes).
191 * @param value Pointer to buffer to receive the emulated value if
192 * PCI_ACCESS_DONE is returned.
194 * @return PCI_ACCESS_PERFORM or PCI_ACCESS_DONE.
196 * @see pci_cfg_write_moderate
198 enum pci_access pci_cfg_read_moderate(struct pci_device *device, u16 address,
199 unsigned int size, u32 *value)
201 const struct jailhouse_pci_capability *cap;
202 unsigned int bar_no, cap_offs;
206 return PCI_ACCESS_DONE;
209 /* Emulate BARs for physical and virtual devices */
210 if (device->info->type != JAILHOUSE_PCI_TYPE_BRIDGE) {
211 /* Emulate BAR access, always returning the shadow value. */
212 if (address >= PCI_CFG_BAR && address <= PCI_CFG_BAR_END) {
213 bar_no = (address - PCI_CFG_BAR) / 4;
214 *value = device->bar[bar_no] >> ((address % 4) * 8);
215 return PCI_ACCESS_DONE;
218 /* We do not expose ROMs. */
219 if (address >= PCI_CFG_ROMBAR && address < PCI_CFG_CAPS) {
221 return PCI_ACCESS_DONE;
225 if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
226 return pci_ivshmem_cfg_read(device, address, value);
228 if (address < PCI_CONFIG_HEADER_SIZE)
229 return PCI_ACCESS_PERFORM;
231 cap = pci_find_capability(device, address);
233 return PCI_ACCESS_PERFORM;
235 cap_offs = address - cap->start;
236 if (cap->id == PCI_CAP_MSI && cap_offs >= 4 &&
237 (cap_offs < 10 || (device->info->msi_64bits && cap_offs < 14))) {
238 *value = device->msi_registers.raw[cap_offs / 4] >>
239 ((cap_offs % 4) * 8);
240 return PCI_ACCESS_DONE;
243 return PCI_ACCESS_PERFORM;
246 static int pci_update_msix(struct pci_device *device,
247 const struct jailhouse_pci_capability *cap)
252 for (n = 0; n < device->info->num_msix_vectors; n++) {
253 result = arch_pci_update_msix_vector(device, n);
261 * Moderate config space write access.
262 * @param device The device to be accessed. If NULL, access will be
264 * @param address Config space address.
265 * @param size Access size (1, 2 or 4 bytes).
266 * @param value Value to be written.
268 * @return PCI_ACCESS_REJECT, PCI_ACCESS_PERFORM or PCI_ACCESS_DONE.
270 * @see pci_cfg_read_moderate
272 enum pci_access pci_cfg_write_moderate(struct pci_device *device, u16 address,
273 unsigned int size, u32 value)
275 const struct jailhouse_pci_capability *cap;
276 /* initialize list to work around wrong compiler warning */
277 unsigned int bias_shift = (address % 4) * 8;
278 u32 mask = BYTE_MASK(size) << bias_shift;
279 struct pci_cfg_control cfg_control;
280 unsigned int bar_no, cap_offs;
283 return PCI_ACCESS_REJECT;
285 value <<= bias_shift;
287 /* Emulate BARs for physical and virtual devices */
288 if (device->info->type != JAILHOUSE_PCI_TYPE_BRIDGE &&
289 address >= PCI_CFG_BAR && address <= PCI_CFG_BAR_END) {
290 bar_no = (address - PCI_CFG_BAR) / 4;
291 mask &= device->info->bar_mask[bar_no];
292 device->bar[bar_no] &= ~mask;
293 device->bar[bar_no] |= value & mask;
294 return PCI_ACCESS_DONE;
297 if (address < PCI_CONFIG_HEADER_SIZE) {
298 if (device->info->type == JAILHOUSE_PCI_TYPE_BRIDGE)
299 cfg_control = bridge_write[address / 4];
300 else /* physical or virtual device */
301 cfg_control = endpoint_write[address / 4];
303 if ((cfg_control.mask & mask) != mask)
304 return PCI_ACCESS_REJECT;
306 switch (cfg_control.type) {
307 case PCI_CONFIG_ALLOW:
308 if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
309 return pci_ivshmem_cfg_write(device,
310 address / 4, mask, value);
311 return PCI_ACCESS_PERFORM;
312 case PCI_CONFIG_RDONLY:
313 return PCI_ACCESS_DONE;
315 return PCI_ACCESS_REJECT;
319 if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM)
320 return pci_ivshmem_cfg_write(device, address / 4, mask, value);
322 cap = pci_find_capability(device, address);
323 if (!cap || !(cap->flags & JAILHOUSE_PCICAPS_WRITE))
324 return PCI_ACCESS_REJECT;
326 cap_offs = address - cap->start;
327 if (cap->id == PCI_CAP_MSI &&
328 (cap_offs < 10 || (device->info->msi_64bits && cap_offs < 14))) {
329 device->msi_registers.raw[cap_offs / 4] &= ~mask;
330 device->msi_registers.raw[cap_offs / 4] |= value;
332 if (arch_pci_update_msi(device, cap) < 0)
333 return PCI_ACCESS_REJECT;
336 * Address and data words are emulated, the control word is
340 return PCI_ACCESS_DONE;
341 } else if (cap->id == PCI_CAP_MSIX && cap_offs < 4) {
342 device->msix_registers.raw &= ~mask;
343 device->msix_registers.raw |= value;
345 if (pci_update_msix(device, cap) < 0)
346 return PCI_ACCESS_REJECT;
349 return PCI_ACCESS_PERFORM;
353 * Initialization of PCI subsystem.
355 * @return 0 on success, negative error code otherwise.
361 err = pci_cell_init(&root_cell);
365 mmcfg_start = system_config->platform_info.x86.mmconfig_base;
366 if (mmcfg_start == 0)
369 end_bus = system_config->platform_info.x86.mmconfig_end_bus;
370 mmcfg_size = (end_bus + 1) * 256 * 4096;
372 pci_space = page_alloc(&remap_pool, mmcfg_size / PAGE_SIZE);
374 return trace_error(-ENOMEM);
376 return paging_create(&hv_paging_structs, mmcfg_start, mmcfg_size,
377 (unsigned long)pci_space,
378 PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
379 PAGING_NON_COHERENT);
382 static int pci_msix_access_handler(const struct cell *cell, bool is_write,
383 u64 addr, u32 *value)
385 unsigned int dword = (addr % sizeof(union pci_msix_vector)) >> 2;
386 struct pci_device *device = cell->msix_device_list;
391 if (addr >= device->info->msix_address &&
392 addr < device->info->msix_address +
393 device->info->msix_region_size)
395 device = device->next_msix_device;
400 /* access must be DWORD-aligned */
404 offs = addr - device->info->msix_address;
405 index = offs / sizeof(union pci_msix_vector);
409 * The PBA may share a page with the MSI-X table. Writing to
410 * PBA entries is undefined. We declare it as invalid.
412 if (index >= device->info->num_msix_vectors)
415 device->msix_vectors[index].raw[dword] = *value;
416 if (arch_pci_update_msix_vector(device, index) < 0)
419 if (dword == MSIX_VECTOR_CTRL_DWORD)
420 mmio_write32(&device->msix_table[index].raw[dword],
423 if (index >= device->info->num_msix_vectors ||
424 dword == MSIX_VECTOR_CTRL_DWORD)
426 mmio_read32(((void *)device->msix_table) + offs);
428 *value = device->msix_vectors[index].raw[dword];
433 panic_printk("FATAL: Invalid PCI MSI-X table/PBA access, device "
434 "%02x:%02x.%x\n", PCI_BDF_PARAMS(device->info->bdf));
438 static enum mmio_result pci_mmconfig_access_handler(void *arg,
439 struct mmio_access *mmio)
441 u32 reg_addr = mmio->address & 0xfff;
442 struct pci_device *device;
443 enum pci_access result;
446 /* access must be DWORD-aligned */
450 device = pci_get_assigned_device(this_cell(), mmio->address >> 12);
452 if (mmio->is_write) {
453 result = pci_cfg_write_moderate(device, reg_addr, 4,
455 if (result == PCI_ACCESS_REJECT)
457 if (result == PCI_ACCESS_PERFORM)
458 mmio_write32(pci_space + mmio->address, mmio->value);
460 result = pci_cfg_read_moderate(device, reg_addr, 4, &val);
461 if (result == PCI_ACCESS_PERFORM)
462 mmio->value = mmio_read32(pci_space + mmio->address);
470 panic_printk("FATAL: Invalid PCI MMCONFIG write, device %02x:%02x.%x, "
471 "reg: %\n", PCI_BDF_PARAMS(mmio->address >> 12),
478 * Handler for MMIO-accesses to PCI config space.
479 * @param cell Request issuing cell.
480 * @param is_write True if write access.
481 * @param addr Address accessed.
482 * @param value Pointer to value for reading/writing.
484 * @return 1 if handled successfully, 0 if unhandled, -1 on access error.
486 int pci_mmio_access_handler(const struct cell *cell, bool is_write,
487 u64 addr, u32 *value)
489 return pci_msix_access_handler(cell, is_write, addr, value);
493 * Retrieve number of enabled MSI vector of a device.
494 * @param device The device to be examined.
496 * @return number of vectors.
498 unsigned int pci_enabled_msi_vectors(struct pci_device *device)
500 return device->msi_registers.msg32.enable ?
501 1 << device->msi_registers.msg32.mme : 0;
504 static void pci_save_msi(struct pci_device *device,
505 const struct jailhouse_pci_capability *cap)
507 u16 bdf = device->info->bdf;
510 for (n = 0; n < (device->info->msi_64bits ? 4 : 3); n++)
511 device->msi_registers.raw[n] =
512 pci_read_config(bdf, cap->start + n * 4, 4);
515 static void pci_restore_msi(struct pci_device *device,
516 const struct jailhouse_pci_capability *cap)
520 for (n = 1; n < (device->info->msi_64bits ? 4 : 3); n++)
521 pci_write_config(device->info->bdf, cap->start + n * 4,
522 device->msi_registers.raw[n], 4);
525 static void pci_suppress_msix(struct pci_device *device,
526 const struct jailhouse_pci_capability *cap,
529 union pci_msix_registers regs = device->msix_registers;
533 pci_write_config(device->info->bdf, cap->start, regs.raw, 4);
536 static void pci_save_msix(struct pci_device *device,
537 const struct jailhouse_pci_capability *cap)
541 device->msix_registers.raw =
542 pci_read_config(device->info->bdf, cap->start, 4);
544 for (n = 0; n < device->info->num_msix_vectors; n++)
545 for (r = 0; r < 4; r++)
546 device->msix_vectors[n].raw[r] =
547 mmio_read32(&device->msix_table[n].raw[r]);
550 static void pci_restore_msix(struct pci_device *device,
551 const struct jailhouse_pci_capability *cap)
555 for (n = 0; n < device->info->num_msix_vectors; n++)
556 /* only restore address/data, control is write-through */
557 for (r = 0; r < 3; r++)
558 mmio_write32(&device->msix_table[n].raw[r],
559 device->msix_vectors[n].raw[r]);
560 pci_suppress_msix(device, cap, false);
564 * Prepare the handover of PCI devices to Jailhouse or back to Linux.
566 void pci_prepare_handover(void)
568 const struct jailhouse_pci_capability *cap;
569 struct pci_device *device;
572 if (!root_cell.pci_devices)
575 for_each_configured_pci_device(device, &root_cell) {
577 for_each_pci_cap(cap, device, n)
578 if (cap->id == PCI_CAP_MSI)
579 arch_pci_suppress_msi(device, cap);
580 else if (cap->id == PCI_CAP_MSIX)
581 pci_suppress_msix(device, cap, true);
585 static int pci_add_physical_device(struct cell *cell, struct pci_device *device)
587 unsigned int n, pages, size = device->info->msix_region_size;
590 printk("Adding PCI device %02x:%02x.%x to cell \"%s\"\n",
591 PCI_BDF_PARAMS(device->info->bdf), cell->config->name);
593 for (n = 0; n < PCI_NUM_BARS; n ++)
594 device->bar[n] = pci_read_config(device->info->bdf,
595 PCI_CFG_BAR + n * 4, 4);
597 err = arch_pci_add_physical_device(cell, device);
599 if (!err && device->info->msix_address) {
600 device->msix_table = page_alloc(&remap_pool, size / PAGE_SIZE);
601 if (!device->msix_table) {
602 err = trace_error(-ENOMEM);
603 goto error_remove_dev;
606 err = paging_create(&hv_paging_structs,
607 device->info->msix_address, size,
608 (unsigned long)device->msix_table,
609 PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
610 PAGING_NON_COHERENT);
612 goto error_page_free;
614 if (device->info->num_msix_vectors > PCI_EMBEDDED_MSIX_VECTS) {
615 pages = PAGES(sizeof(union pci_msix_vector) *
616 device->info->num_msix_vectors);
617 device->msix_vectors = page_alloc(&mem_pool, pages);
618 if (!device->msix_vectors) {
620 goto error_unmap_table;
624 device->next_msix_device = cell->msix_device_list;
625 cell->msix_device_list = device;
630 /* cannot fail, destruction of same size as construction */
631 paging_destroy(&hv_paging_structs, (unsigned long)device->msix_table,
632 size, PAGING_NON_COHERENT);
634 page_free(&remap_pool, device->msix_table, size / PAGE_SIZE);
636 arch_pci_remove_physical_device(device);
640 static void pci_remove_physical_device(struct pci_device *device)
642 unsigned int size = device->info->msix_region_size;
643 struct pci_device *prev_msix_device;
645 printk("Removing PCI device %02x:%02x.%x from cell \"%s\"\n",
646 PCI_BDF_PARAMS(device->info->bdf), device->cell->config->name);
647 arch_pci_remove_physical_device(device);
648 pci_write_config(device->info->bdf, PCI_CFG_COMMAND,
649 PCI_CMD_INTX_OFF, 2);
651 if (!device->msix_table)
654 /* cannot fail, destruction of same size as construction */
655 paging_destroy(&hv_paging_structs, (unsigned long)device->msix_table,
656 size, PAGING_NON_COHERENT);
657 page_free(&remap_pool, device->msix_table, size / PAGE_SIZE);
659 if (device->msix_vectors != device->msix_vector_array)
660 page_free(&mem_pool, device->msix_vectors,
661 PAGES(sizeof(union pci_msix_vector) *
662 device->info->num_msix_vectors));
664 prev_msix_device = device->cell->msix_device_list;
665 if (prev_msix_device == device) {
666 device->cell->msix_device_list = device->next_msix_device;
668 while (prev_msix_device->next_msix_device != device)
669 prev_msix_device = prev_msix_device->next_msix_device;
670 prev_msix_device->next_msix_device = device->next_msix_device;
675 * Perform PCI-specific initialization for a new cell.
676 * @param cell Cell to be initialized.
678 * @return 0 on success, negative error code otherwise.
682 int pci_cell_init(struct cell *cell)
684 unsigned int devlist_pages = PAGES(cell->config->num_pci_devices *
685 sizeof(struct pci_device));
686 const struct jailhouse_pci_device *dev_infos =
687 jailhouse_cell_pci_devices(cell->config);
688 const struct jailhouse_pci_capability *cap;
689 struct pci_device *device, *root_device;
690 unsigned int ndev, ncap;
694 mmio_region_register(cell, mmcfg_start, mmcfg_size,
695 pci_mmconfig_access_handler, NULL);
697 cell->pci_devices = page_alloc(&mem_pool, devlist_pages);
698 if (!cell->pci_devices)
702 * We order device states in the same way as the static information
703 * so that we can use the index of the latter to find the former. For
704 * the other way around and for obtaining the owner cell, we use more
705 * handy pointers. The cell pointer also encodes active ownership.
707 for (ndev = 0; ndev < cell->config->num_pci_devices; ndev++) {
708 device = &cell->pci_devices[ndev];
709 device->info = &dev_infos[ndev];
710 device->msix_vectors = device->msix_vector_array;
712 if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM) {
713 err = pci_ivshmem_init(cell, device);
722 root_device = pci_get_assigned_device(&root_cell,
723 dev_infos[ndev].bdf);
725 pci_remove_physical_device(root_device);
726 root_device->cell = NULL;
729 err = pci_add_physical_device(cell, device);
735 for_each_pci_cap(cap, device, ncap)
736 if (cap->id == PCI_CAP_MSI)
737 pci_save_msi(device, cap);
738 else if (cap->id == PCI_CAP_MSIX)
739 pci_save_msix(device, cap);
742 if (cell == &root_cell)
743 pci_prepare_handover();
751 static void pci_return_device_to_root_cell(struct pci_device *device)
753 struct pci_device *root_device;
755 for_each_configured_pci_device(root_device, &root_cell)
756 if (root_device->info->domain == device->info->domain &&
757 root_device->info->bdf == device->info->bdf) {
758 if (pci_add_physical_device(&root_cell,
760 printk("WARNING: Failed to re-assign PCI "
761 "device to root cell\n");
763 root_device->cell = &root_cell;
769 * Perform PCI-specific cleanup for a cell under destruction.
770 * @param cell Cell to be destructed.
774 void pci_cell_exit(struct cell *cell)
776 unsigned int devlist_pages = PAGES(cell->config->num_pci_devices *
777 sizeof(struct pci_device));
778 struct pci_device *device;
781 * Do not destroy the root cell. We will shut down the complete
782 * hypervisor instead.
784 if (cell == &root_cell)
787 for_each_configured_pci_device(device, cell)
789 if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM) {
790 pci_ivshmem_exit(device);
792 pci_remove_physical_device(device);
793 pci_return_device_to_root_cell(device);
797 page_free(&mem_pool, cell->pci_devices, devlist_pages);
801 * Apply PCI-specific configuration changes.
802 * @param cell_added_removed Cell that was added or removed to/from the
805 * @see arch_config_commit
807 void pci_config_commit(struct cell *cell_added_removed)
809 const struct jailhouse_pci_capability *cap;
810 struct pci_device *device;
814 if (!cell_added_removed)
817 for_each_configured_pci_device(device, &root_cell)
819 for_each_pci_cap(cap, device, n) {
820 if (cap->id == PCI_CAP_MSI) {
821 err = arch_pci_update_msi(device, cap);
822 } else if (cap->id == PCI_CAP_MSIX) {
823 err = pci_update_msix(device, cap);
824 pci_suppress_msix(device, cap, false);
829 if (device->info->type == JAILHOUSE_PCI_TYPE_IVSHMEM) {
830 err = pci_ivshmem_update_msix(device);
840 panic_printk("FATAL: Unsupported MSI/MSI-X state, device %02x:%02x.%x",
841 PCI_BDF_PARAMS(device->info->bdf));
843 panic_printk(", cap %d\n", cap->id);
850 * Shut down the PCI layer during hypervisor deactivation.
852 void pci_shutdown(void)
854 const struct jailhouse_pci_capability *cap;
855 struct pci_device *device;
858 if (!root_cell.pci_devices)
861 for_each_configured_pci_device(device, &root_cell) {
865 for_each_pci_cap(cap, device, n)
866 if (cap->id == PCI_CAP_MSI)
867 pci_restore_msi(device, cap);
868 else if (cap->id == PCI_CAP_MSIX)
869 pci_restore_msix(device, cap);
871 if (device->cell != &root_cell)
872 pci_write_config(device->info->bdf, PCI_CFG_COMMAND,
873 PCI_CMD_INTX_OFF, 2);