2 * vfio based device assignment support
4 * Copyright Red Hat, Inc. 2012
7 * Alex Williamson <alex.williamson@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Based on qemu-kvm device-assignment:
13 * Adapted for KVM by Qumranet.
14 * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
15 * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
16 * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
17 * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
18 * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
22 #include <linux/vfio.h>
23 #include <sys/ioctl.h>
26 #include <sys/types.h>
30 #include "exec/address-spaces.h"
31 #include "exec/memory.h"
32 #include "hw/pci/msi.h"
33 #include "hw/pci/msix.h"
34 #include "hw/pci/pci.h"
35 #include "qemu-common.h"
36 #include "qemu/error-report.h"
37 #include "qemu/event_notifier.h"
38 #include "qemu/queue.h"
39 #include "qemu/range.h"
40 #include "sysemu/kvm.h"
41 #include "sysemu/sysemu.h"
43 /* #define DEBUG_VFIO */
45 #define DPRINTF(fmt, ...) \
46 do { fprintf(stderr, "vfio: " fmt, ## __VA_ARGS__); } while (0)
48 #define DPRINTF(fmt, ...) \
52 /* Extra debugging, trap acceleration paths for more logging */
53 #define VFIO_ALLOW_MMAP 1
54 #define VFIO_ALLOW_KVM_INTX 1
58 typedef struct VFIOQuirk {
60 struct VFIODevice *vdev;
61 QLIST_ENTRY(VFIOQuirk) next;
63 uint32_t base_offset:TARGET_PAGE_BITS;
64 uint32_t address_offset:TARGET_PAGE_BITS;
65 uint32_t address_size:3;
68 uint32_t address_match;
69 uint32_t address_mask;
71 uint32_t address_val:TARGET_PAGE_BITS;
72 uint32_t data_offset:TARGET_PAGE_BITS;
81 typedef struct VFIOBAR {
82 off_t fd_offset; /* offset of BAR within device fd */
83 int fd; /* device fd, allows us to pass VFIOBAR as opaque data */
84 MemoryRegion mem; /* slow, read/write access */
85 MemoryRegion mmap_mem; /* direct mapped access */
88 uint32_t flags; /* VFIO region flags (rd/wr/mmap) */
89 uint8_t nr; /* cache the BAR number for debug */
92 QLIST_HEAD(, VFIOQuirk) quirks;
95 typedef struct VFIOVGARegion {
99 QLIST_HEAD(, VFIOQuirk) quirks;
102 typedef struct VFIOVGA {
105 VFIOVGARegion region[QEMU_PCI_VGA_NUM_REGIONS];
108 typedef struct VFIOINTx {
109 bool pending; /* interrupt pending */
110 bool kvm_accel; /* set when QEMU bypass through KVM enabled */
111 uint8_t pin; /* which pin to pull for qemu_set_irq */
112 EventNotifier interrupt; /* eventfd triggered on interrupt */
113 EventNotifier unmask; /* eventfd for unmask on QEMU bypass */
114 PCIINTxRoute route; /* routing info for QEMU bypass */
115 uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */
116 QEMUTimer *mmap_timer; /* enable mmaps after periods w/o interrupts */
119 typedef struct VFIOMSIVector {
120 EventNotifier interrupt; /* eventfd triggered on interrupt */
121 struct VFIODevice *vdev; /* back pointer to device */
122 MSIMessage msg; /* cache the MSI message so we know when it changes */
123 int virq; /* KVM irqchip route for QEMU bypass */
136 typedef struct VFIOContainer {
137 int fd; /* /dev/vfio/vfio, empowered by the attached groups */
139 /* enable abstraction to support various iommu backends */
141 MemoryListener listener; /* Used by type1 iommu */
143 void (*release)(struct VFIOContainer *);
145 QLIST_HEAD(, VFIOGroup) group_list;
146 QLIST_ENTRY(VFIOContainer) next;
149 /* Cache of MSI-X setup plus extra mmap and memory region for split BAR map */
150 typedef struct VFIOMSIXInfo {
154 uint32_t table_offset;
156 MemoryRegion mmap_mem;
160 typedef struct VFIODevice {
164 unsigned int config_size;
165 uint8_t *emulated_config_bits; /* QEMU emulated bits, little-endian */
166 off_t config_offset; /* Offset of config space region within device fd */
167 unsigned int rom_size;
168 off_t rom_offset; /* Offset of ROM region within device fd */
171 VFIOMSIVector *msi_vectors;
173 int nr_vectors; /* Number of MSI/MSIX vectors currently in use */
174 int interrupt; /* Current interrupt type */
175 VFIOBAR bars[PCI_NUM_REGIONS - 1]; /* No ROM */
176 VFIOVGA vga; /* 0xa0000, 0x3b0, 0x3c0 */
177 PCIHostDeviceAddress host;
178 QLIST_ENTRY(VFIODevice) next;
179 struct VFIOGroup *group;
180 EventNotifier err_notifier;
182 #define VFIO_FEATURE_ENABLE_VGA_BIT 0
183 #define VFIO_FEATURE_ENABLE_VGA (1 << VFIO_FEATURE_ENABLE_VGA_BIT)
194 typedef struct VFIOGroup {
197 VFIOContainer *container;
198 QLIST_HEAD(, VFIODevice) device_list;
199 QLIST_ENTRY(VFIOGroup) next;
200 QLIST_ENTRY(VFIOGroup) container_next;
203 #define MSIX_CAP_LENGTH 12
205 static QLIST_HEAD(, VFIOContainer)
206 container_list = QLIST_HEAD_INITIALIZER(container_list);
208 static QLIST_HEAD(, VFIOGroup)
209 group_list = QLIST_HEAD_INITIALIZER(group_list);
211 static void vfio_disable_interrupts(VFIODevice *vdev);
212 static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len);
213 static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr,
214 uint32_t val, int len);
215 static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled);
218 * Common VFIO interrupt disable
220 static void vfio_disable_irqindex(VFIODevice *vdev, int index)
222 struct vfio_irq_set irq_set = {
223 .argsz = sizeof(irq_set),
224 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
230 ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
236 static void vfio_unmask_intx(VFIODevice *vdev)
238 struct vfio_irq_set irq_set = {
239 .argsz = sizeof(irq_set),
240 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
241 .index = VFIO_PCI_INTX_IRQ_INDEX,
246 ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
249 #ifdef CONFIG_KVM /* Unused outside of CONFIG_KVM code */
250 static void vfio_mask_intx(VFIODevice *vdev)
252 struct vfio_irq_set irq_set = {
253 .argsz = sizeof(irq_set),
254 .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
255 .index = VFIO_PCI_INTX_IRQ_INDEX,
260 ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
265 * Disabling BAR mmaping can be slow, but toggling it around INTx can
266 * also be a huge overhead. We try to get the best of both worlds by
267 * waiting until an interrupt to disable mmaps (subsequent transitions
268 * to the same state are effectively no overhead). If the interrupt has
269 * been serviced and the time gap is long enough, we re-enable mmaps for
270 * performance. This works well for things like graphics cards, which
271 * may not use their interrupt at all and are penalized to an unusable
272 * level by read/write BAR traps. Other devices, like NICs, have more
273 * regular interrupts and see much better latency by staying in non-mmap
274 * mode. We therefore set the default mmap_timeout such that a ping
275 * is just enough to keep the mmap disabled. Users can experiment with
276 * other options with the x-intx-mmap-timeout-ms parameter (a value of
277 * zero disables the timer).
279 static void vfio_intx_mmap_enable(void *opaque)
281 VFIODevice *vdev = opaque;
283 if (vdev->intx.pending) {
284 timer_mod(vdev->intx.mmap_timer,
285 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
289 vfio_mmap_set_enabled(vdev, true);
292 static void vfio_intx_interrupt(void *opaque)
294 VFIODevice *vdev = opaque;
296 if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
300 DPRINTF("%s(%04x:%02x:%02x.%x) Pin %c\n", __func__, vdev->host.domain,
301 vdev->host.bus, vdev->host.slot, vdev->host.function,
302 'A' + vdev->intx.pin);
304 vdev->intx.pending = true;
305 pci_irq_assert(&vdev->pdev);
306 vfio_mmap_set_enabled(vdev, false);
307 if (vdev->intx.mmap_timeout) {
308 timer_mod(vdev->intx.mmap_timer,
309 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
313 static void vfio_eoi(VFIODevice *vdev)
315 if (!vdev->intx.pending) {
319 DPRINTF("%s(%04x:%02x:%02x.%x) EOI\n", __func__, vdev->host.domain,
320 vdev->host.bus, vdev->host.slot, vdev->host.function);
322 vdev->intx.pending = false;
323 pci_irq_deassert(&vdev->pdev);
324 vfio_unmask_intx(vdev);
327 static void vfio_enable_intx_kvm(VFIODevice *vdev)
330 struct kvm_irqfd irqfd = {
331 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
332 .gsi = vdev->intx.route.irq,
333 .flags = KVM_IRQFD_FLAG_RESAMPLE,
335 struct vfio_irq_set *irq_set;
339 if (!VFIO_ALLOW_KVM_INTX || !kvm_irqfds_enabled() ||
340 vdev->intx.route.mode != PCI_INTX_ENABLED ||
341 !kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) {
345 /* Get to a known interrupt state */
346 qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
347 vfio_mask_intx(vdev);
348 vdev->intx.pending = false;
349 pci_irq_deassert(&vdev->pdev);
351 /* Get an eventfd for resample/unmask */
352 if (event_notifier_init(&vdev->intx.unmask, 0)) {
353 error_report("vfio: Error: event_notifier_init failed eoi");
357 /* KVM triggers it, VFIO listens for it */
358 irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
360 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
361 error_report("vfio: Error: Failed to setup resample irqfd: %m");
365 argsz = sizeof(*irq_set) + sizeof(*pfd);
367 irq_set = g_malloc0(argsz);
368 irq_set->argsz = argsz;
369 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
370 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
373 pfd = (int32_t *)&irq_set->data;
375 *pfd = irqfd.resamplefd;
377 ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
380 error_report("vfio: Error: Failed to setup INTx unmask fd: %m");
385 vfio_unmask_intx(vdev);
387 vdev->intx.kvm_accel = true;
389 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel enabled\n",
390 __func__, vdev->host.domain, vdev->host.bus,
391 vdev->host.slot, vdev->host.function);
396 irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
397 kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
399 event_notifier_cleanup(&vdev->intx.unmask);
401 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
402 vfio_unmask_intx(vdev);
406 static void vfio_disable_intx_kvm(VFIODevice *vdev)
409 struct kvm_irqfd irqfd = {
410 .fd = event_notifier_get_fd(&vdev->intx.interrupt),
411 .gsi = vdev->intx.route.irq,
412 .flags = KVM_IRQFD_FLAG_DEASSIGN,
415 if (!vdev->intx.kvm_accel) {
420 * Get to a known state, hardware masked, QEMU ready to accept new
421 * interrupts, QEMU IRQ de-asserted.
423 vfio_mask_intx(vdev);
424 vdev->intx.pending = false;
425 pci_irq_deassert(&vdev->pdev);
427 /* Tell KVM to stop listening for an INTx irqfd */
428 if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
429 error_report("vfio: Error: Failed to disable INTx irqfd: %m");
432 /* We only need to close the eventfd for VFIO to cleanup the kernel side */
433 event_notifier_cleanup(&vdev->intx.unmask);
435 /* QEMU starts listening for interrupt events. */
436 qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
438 vdev->intx.kvm_accel = false;
440 /* If we've missed an event, let it re-fire through QEMU */
441 vfio_unmask_intx(vdev);
443 DPRINTF("%s(%04x:%02x:%02x.%x) KVM INTx accel disabled\n",
444 __func__, vdev->host.domain, vdev->host.bus,
445 vdev->host.slot, vdev->host.function);
449 static void vfio_update_irq(PCIDevice *pdev)
451 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
454 if (vdev->interrupt != VFIO_INT_INTx) {
458 route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
460 if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
461 return; /* Nothing changed */
464 DPRINTF("%s(%04x:%02x:%02x.%x) IRQ moved %d -> %d\n", __func__,
465 vdev->host.domain, vdev->host.bus, vdev->host.slot,
466 vdev->host.function, vdev->intx.route.irq, route.irq);
468 vfio_disable_intx_kvm(vdev);
470 vdev->intx.route = route;
472 if (route.mode != PCI_INTX_ENABLED) {
476 vfio_enable_intx_kvm(vdev);
478 /* Re-enable the interrupt in cased we missed an EOI */
482 static int vfio_enable_intx(VFIODevice *vdev)
484 uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
486 struct vfio_irq_set *irq_set;
493 vfio_disable_interrupts(vdev);
495 vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
496 pci_config_set_interrupt_pin(vdev->pdev.config, pin);
500 * Only conditional to avoid generating error messages on platforms
501 * where we won't actually use the result anyway.
503 if (kvm_irqfds_enabled() &&
504 kvm_check_extension(kvm_state, KVM_CAP_IRQFD_RESAMPLE)) {
505 vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
510 ret = event_notifier_init(&vdev->intx.interrupt, 0);
512 error_report("vfio: Error: event_notifier_init failed");
516 argsz = sizeof(*irq_set) + sizeof(*pfd);
518 irq_set = g_malloc0(argsz);
519 irq_set->argsz = argsz;
520 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
521 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
524 pfd = (int32_t *)&irq_set->data;
526 *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
527 qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
529 ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
532 error_report("vfio: Error: Failed to setup INTx fd: %m");
533 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
534 event_notifier_cleanup(&vdev->intx.interrupt);
538 vfio_enable_intx_kvm(vdev);
540 vdev->interrupt = VFIO_INT_INTx;
542 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
543 vdev->host.bus, vdev->host.slot, vdev->host.function);
548 static void vfio_disable_intx(VFIODevice *vdev)
552 timer_del(vdev->intx.mmap_timer);
553 vfio_disable_intx_kvm(vdev);
554 vfio_disable_irqindex(vdev, VFIO_PCI_INTX_IRQ_INDEX);
555 vdev->intx.pending = false;
556 pci_irq_deassert(&vdev->pdev);
557 vfio_mmap_set_enabled(vdev, true);
559 fd = event_notifier_get_fd(&vdev->intx.interrupt);
560 qemu_set_fd_handler(fd, NULL, NULL, vdev);
561 event_notifier_cleanup(&vdev->intx.interrupt);
563 vdev->interrupt = VFIO_INT_NONE;
565 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
566 vdev->host.bus, vdev->host.slot, vdev->host.function);
572 static void vfio_msi_interrupt(void *opaque)
574 VFIOMSIVector *vector = opaque;
575 VFIODevice *vdev = vector->vdev;
576 int nr = vector - vdev->msi_vectors;
578 if (!event_notifier_test_and_clear(&vector->interrupt)) {
582 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d\n", __func__,
583 vdev->host.domain, vdev->host.bus, vdev->host.slot,
584 vdev->host.function, nr);
586 if (vdev->interrupt == VFIO_INT_MSIX) {
587 msix_notify(&vdev->pdev, nr);
588 } else if (vdev->interrupt == VFIO_INT_MSI) {
589 msi_notify(&vdev->pdev, nr);
591 error_report("vfio: MSI interrupt receieved, but not enabled?");
595 static int vfio_enable_vectors(VFIODevice *vdev, bool msix)
597 struct vfio_irq_set *irq_set;
598 int ret = 0, i, argsz;
601 argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
603 irq_set = g_malloc0(argsz);
604 irq_set->argsz = argsz;
605 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
606 irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
608 irq_set->count = vdev->nr_vectors;
609 fds = (int32_t *)&irq_set->data;
611 for (i = 0; i < vdev->nr_vectors; i++) {
612 if (!vdev->msi_vectors[i].use) {
617 fds[i] = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
620 ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
627 static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
628 MSIMessage *msg, IOHandler *handler)
630 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
631 VFIOMSIVector *vector;
634 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d used\n", __func__,
635 vdev->host.domain, vdev->host.bus, vdev->host.slot,
636 vdev->host.function, nr);
638 vector = &vdev->msi_vectors[nr];
642 msix_vector_use(pdev, nr);
644 if (event_notifier_init(&vector->interrupt, 0)) {
645 error_report("vfio: Error: event_notifier_init failed");
649 * Attempt to enable route through KVM irqchip,
650 * default to userspace handling if unavailable.
652 vector->virq = msg ? kvm_irqchip_add_msi_route(kvm_state, *msg) : -1;
653 if (vector->virq < 0 ||
654 kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt,
655 NULL, vector->virq) < 0) {
656 if (vector->virq >= 0) {
657 kvm_irqchip_release_virq(kvm_state, vector->virq);
660 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
661 handler, NULL, vector);
665 * We don't want to have the host allocate all possible MSI vectors
666 * for a device if they're not in use, so we shutdown and incrementally
667 * increase them as needed.
669 if (vdev->nr_vectors < nr + 1) {
670 vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX);
671 vdev->nr_vectors = nr + 1;
672 ret = vfio_enable_vectors(vdev, true);
674 error_report("vfio: failed to enable vectors, %d", ret);
678 struct vfio_irq_set *irq_set;
681 argsz = sizeof(*irq_set) + sizeof(*pfd);
683 irq_set = g_malloc0(argsz);
684 irq_set->argsz = argsz;
685 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
686 VFIO_IRQ_SET_ACTION_TRIGGER;
687 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
690 pfd = (int32_t *)&irq_set->data;
692 *pfd = event_notifier_get_fd(&vector->interrupt);
694 ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
697 error_report("vfio: failed to modify vector, %d", ret);
704 static int vfio_msix_vector_use(PCIDevice *pdev,
705 unsigned int nr, MSIMessage msg)
707 return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
710 static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
712 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
713 VFIOMSIVector *vector = &vdev->msi_vectors[nr];
715 struct vfio_irq_set *irq_set;
718 DPRINTF("%s(%04x:%02x:%02x.%x) vector %d released\n", __func__,
719 vdev->host.domain, vdev->host.bus, vdev->host.slot,
720 vdev->host.function, nr);
723 * XXX What's the right thing to do here? This turns off the interrupt
724 * completely, but do we really just want to switch the interrupt to
725 * bouncing through userspace and let msix.c drop it? Not sure.
727 msix_vector_unuse(pdev, nr);
729 argsz = sizeof(*irq_set) + sizeof(*pfd);
731 irq_set = g_malloc0(argsz);
732 irq_set->argsz = argsz;
733 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
734 VFIO_IRQ_SET_ACTION_TRIGGER;
735 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
738 pfd = (int32_t *)&irq_set->data;
742 ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
746 if (vector->virq < 0) {
747 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
750 kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt,
752 kvm_irqchip_release_virq(kvm_state, vector->virq);
756 event_notifier_cleanup(&vector->interrupt);
760 static void vfio_enable_msix(VFIODevice *vdev)
762 vfio_disable_interrupts(vdev);
764 vdev->msi_vectors = g_malloc0(vdev->msix->entries * sizeof(VFIOMSIVector));
766 vdev->interrupt = VFIO_INT_MSIX;
769 * Some communication channels between VF & PF or PF & fw rely on the
770 * physical state of the device and expect that enabling MSI-X from the
771 * guest enables the same on the host. When our guest is Linux, the
772 * guest driver call to pci_enable_msix() sets the enabling bit in the
773 * MSI-X capability, but leaves the vector table masked. We therefore
774 * can't rely on a vector_use callback (from request_irq() in the guest)
775 * to switch the physical device into MSI-X mode because that may come a
776 * long time after pci_enable_msix(). This code enables vector 0 with
777 * triggering to userspace, then immediately release the vector, leaving
778 * the physical device with no vectors enabled, but MSI-X enabled, just
779 * like the guest view.
781 vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
782 vfio_msix_vector_release(&vdev->pdev, 0);
784 if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
785 vfio_msix_vector_release, NULL)) {
786 error_report("vfio: msix_set_vector_notifiers failed");
789 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
790 vdev->host.bus, vdev->host.slot, vdev->host.function);
793 static void vfio_enable_msi(VFIODevice *vdev)
797 vfio_disable_interrupts(vdev);
799 vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
801 vdev->msi_vectors = g_malloc0(vdev->nr_vectors * sizeof(VFIOMSIVector));
803 for (i = 0; i < vdev->nr_vectors; i++) {
804 VFIOMSIVector *vector = &vdev->msi_vectors[i];
809 if (event_notifier_init(&vector->interrupt, 0)) {
810 error_report("vfio: Error: event_notifier_init failed");
813 vector->msg = msi_get_message(&vdev->pdev, i);
816 * Attempt to enable route through KVM irqchip,
817 * default to userspace handling if unavailable.
819 vector->virq = kvm_irqchip_add_msi_route(kvm_state, vector->msg);
820 if (vector->virq < 0 ||
821 kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt,
822 NULL, vector->virq) < 0) {
823 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
824 vfio_msi_interrupt, NULL, vector);
828 ret = vfio_enable_vectors(vdev, false);
831 error_report("vfio: Error: Failed to setup MSI fds: %m");
832 } else if (ret != vdev->nr_vectors) {
833 error_report("vfio: Error: Failed to enable %d "
834 "MSI vectors, retry with %d", vdev->nr_vectors, ret);
837 for (i = 0; i < vdev->nr_vectors; i++) {
838 VFIOMSIVector *vector = &vdev->msi_vectors[i];
839 if (vector->virq >= 0) {
840 kvm_irqchip_remove_irqfd_notifier(kvm_state, &vector->interrupt,
842 kvm_irqchip_release_virq(kvm_state, vector->virq);
845 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
848 event_notifier_cleanup(&vector->interrupt);
851 g_free(vdev->msi_vectors);
853 if (ret > 0 && ret != vdev->nr_vectors) {
854 vdev->nr_vectors = ret;
857 vdev->nr_vectors = 0;
862 vdev->interrupt = VFIO_INT_MSI;
864 DPRINTF("%s(%04x:%02x:%02x.%x) Enabled %d MSI vectors\n", __func__,
865 vdev->host.domain, vdev->host.bus, vdev->host.slot,
866 vdev->host.function, vdev->nr_vectors);
869 static void vfio_disable_msi_common(VFIODevice *vdev)
871 g_free(vdev->msi_vectors);
872 vdev->msi_vectors = NULL;
873 vdev->nr_vectors = 0;
874 vdev->interrupt = VFIO_INT_NONE;
876 vfio_enable_intx(vdev);
879 static void vfio_disable_msix(VFIODevice *vdev)
883 msix_unset_vector_notifiers(&vdev->pdev);
886 * MSI-X will only release vectors if MSI-X is still enabled on the
887 * device, check through the rest and release it ourselves if necessary.
889 for (i = 0; i < vdev->nr_vectors; i++) {
890 if (vdev->msi_vectors[i].use) {
891 vfio_msix_vector_release(&vdev->pdev, i);
895 if (vdev->nr_vectors) {
896 vfio_disable_irqindex(vdev, VFIO_PCI_MSIX_IRQ_INDEX);
899 vfio_disable_msi_common(vdev);
901 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
902 vdev->host.bus, vdev->host.slot, vdev->host.function);
905 static void vfio_disable_msi(VFIODevice *vdev)
909 vfio_disable_irqindex(vdev, VFIO_PCI_MSI_IRQ_INDEX);
911 for (i = 0; i < vdev->nr_vectors; i++) {
912 VFIOMSIVector *vector = &vdev->msi_vectors[i];
918 if (vector->virq >= 0) {
919 kvm_irqchip_remove_irqfd_notifier(kvm_state,
920 &vector->interrupt, vector->virq);
921 kvm_irqchip_release_virq(kvm_state, vector->virq);
924 qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
928 event_notifier_cleanup(&vector->interrupt);
931 vfio_disable_msi_common(vdev);
933 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
934 vdev->host.bus, vdev->host.slot, vdev->host.function);
937 static void vfio_update_msi(VFIODevice *vdev)
941 for (i = 0; i < vdev->nr_vectors; i++) {
942 VFIOMSIVector *vector = &vdev->msi_vectors[i];
945 if (!vector->use || vector->virq < 0) {
949 msg = msi_get_message(&vdev->pdev, i);
951 if (msg.address != vector->msg.address ||
952 msg.data != vector->msg.data) {
954 DPRINTF("%s(%04x:%02x:%02x.%x) MSI vector %d changed\n",
955 __func__, vdev->host.domain, vdev->host.bus,
956 vdev->host.slot, vdev->host.function, i);
958 kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg);
965 * IO Port/MMIO - Beware of the endians, VFIO is always little endian
967 static void vfio_bar_write(void *opaque, hwaddr addr,
968 uint64_t data, unsigned size)
970 VFIOBAR *bar = opaque;
983 buf.word = cpu_to_le16(data);
986 buf.dword = cpu_to_le32(data);
989 hw_error("vfio: unsupported write size, %d bytes\n", size);
993 if (pwrite(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
994 error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
995 __func__, addr, data, size);
1000 VFIODevice *vdev = container_of(bar, VFIODevice, bars[bar->nr]);
1002 DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%"PRIx64
1003 ", %d)\n", __func__, vdev->host.domain, vdev->host.bus,
1004 vdev->host.slot, vdev->host.function, bar->nr, addr,
1010 * A read or write to a BAR always signals an INTx EOI. This will
1011 * do nothing if not pending (including not in INTx mode). We assume
1012 * that a BAR access is in response to an interrupt and that BAR
1013 * accesses will service the interrupt. Unfortunately, we don't know
1014 * which access will service the interrupt, so we're potentially
1015 * getting quite a few host interrupts per guest interrupt.
1017 vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr]));
1020 static uint64_t vfio_bar_read(void *opaque,
1021 hwaddr addr, unsigned size)
1023 VFIOBAR *bar = opaque;
1032 if (pread(bar->fd, &buf, size, bar->fd_offset + addr) != size) {
1033 error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1034 __func__, addr, size);
1035 return (uint64_t)-1;
1043 data = le16_to_cpu(buf.word);
1046 data = le32_to_cpu(buf.dword);
1049 hw_error("vfio: unsupported read size, %d bytes\n", size);
1055 VFIODevice *vdev = container_of(bar, VFIODevice, bars[bar->nr]);
1057 DPRINTF("%s(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx
1058 ", %d) = 0x%"PRIx64"\n", __func__, vdev->host.domain,
1059 vdev->host.bus, vdev->host.slot, vdev->host.function,
1060 bar->nr, addr, size, data);
1064 /* Same as write above */
1065 vfio_eoi(container_of(bar, VFIODevice, bars[bar->nr]));
1070 static const MemoryRegionOps vfio_bar_ops = {
1071 .read = vfio_bar_read,
1072 .write = vfio_bar_write,
1073 .endianness = DEVICE_LITTLE_ENDIAN,
1076 static void vfio_pci_load_rom(VFIODevice *vdev)
1078 struct vfio_region_info reg_info = {
1079 .argsz = sizeof(reg_info),
1080 .index = VFIO_PCI_ROM_REGION_INDEX
1086 if (ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) {
1087 error_report("vfio: Error getting ROM info: %m");
1091 DPRINTF("Device %04x:%02x:%02x.%x ROM:\n", vdev->host.domain,
1092 vdev->host.bus, vdev->host.slot, vdev->host.function);
1093 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
1094 (unsigned long)reg_info.size, (unsigned long)reg_info.offset,
1095 (unsigned long)reg_info.flags);
1097 vdev->rom_size = size = reg_info.size;
1098 vdev->rom_offset = reg_info.offset;
1100 if (!vdev->rom_size) {
1104 vdev->rom = g_malloc(size);
1105 memset(vdev->rom, 0xff, size);
1108 bytes = pread(vdev->fd, vdev->rom + off, size, vdev->rom_offset + off);
1111 } else if (bytes > 0) {
1115 if (errno == EINTR || errno == EAGAIN) {
1118 error_report("vfio: Error reading device ROM: %m");
1124 static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
1126 VFIODevice *vdev = opaque;
1127 uint64_t val = ((uint64_t)1 << (size * 8)) - 1;
1129 /* Load the ROM lazily when the guest tries to read it */
1130 if (unlikely(!vdev->rom)) {
1131 vfio_pci_load_rom(vdev);
1134 memcpy(&val, vdev->rom + addr,
1135 (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
1137 DPRINTF("%s(%04x:%02x:%02x.%x, 0x%"HWADDR_PRIx", 0x%x) = 0x%"PRIx64"\n",
1138 __func__, vdev->host.domain, vdev->host.bus, vdev->host.slot,
1139 vdev->host.function, addr, size, val);
1144 static void vfio_rom_write(void *opaque, hwaddr addr,
1145 uint64_t data, unsigned size)
1149 static const MemoryRegionOps vfio_rom_ops = {
1150 .read = vfio_rom_read,
1151 .write = vfio_rom_write,
1152 .endianness = DEVICE_LITTLE_ENDIAN,
1155 static void vfio_pci_size_rom(VFIODevice *vdev)
1157 uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
1158 off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
1161 if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
1166 * Use the same size ROM BAR as the physical device. The contents
1167 * will get filled in later when the guest tries to read it.
1169 if (pread(vdev->fd, &orig, 4, offset) != 4 ||
1170 pwrite(vdev->fd, &size, 4, offset) != 4 ||
1171 pread(vdev->fd, &size, 4, offset) != 4 ||
1172 pwrite(vdev->fd, &orig, 4, offset) != 4) {
1173 error_report("%s(%04x:%02x:%02x.%x) failed: %m",
1174 __func__, vdev->host.domain, vdev->host.bus,
1175 vdev->host.slot, vdev->host.function);
1179 size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
1185 DPRINTF("%04x:%02x:%02x.%x ROM size 0x%x\n", vdev->host.domain,
1186 vdev->host.bus, vdev->host.slot, vdev->host.function, size);
1188 snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom",
1189 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1190 vdev->host.function);
1192 memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
1193 &vfio_rom_ops, vdev, name, size);
1195 pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
1196 PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
1198 vdev->pdev.has_rom = true;
1201 static void vfio_vga_write(void *opaque, hwaddr addr,
1202 uint64_t data, unsigned size)
1204 VFIOVGARegion *region = opaque;
1205 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1212 off_t offset = vga->fd_offset + region->offset + addr;
1219 buf.word = cpu_to_le16(data);
1222 buf.dword = cpu_to_le32(data);
1225 hw_error("vfio: unsupported write size, %d bytes\n", size);
1229 if (pwrite(vga->fd, &buf, size, offset) != size) {
1230 error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
1231 __func__, region->offset + addr, data, size);
1234 DPRINTF("%s(0x%"HWADDR_PRIx", 0x%"PRIx64", %d)\n",
1235 __func__, region->offset + addr, data, size);
1238 static uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
1240 VFIOVGARegion *region = opaque;
1241 VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1249 off_t offset = vga->fd_offset + region->offset + addr;
1251 if (pread(vga->fd, &buf, size, offset) != size) {
1252 error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1253 __func__, region->offset + addr, size);
1254 return (uint64_t)-1;
1262 data = le16_to_cpu(buf.word);
1265 data = le32_to_cpu(buf.dword);
1268 hw_error("vfio: unsupported read size, %d bytes\n", size);
1272 DPRINTF("%s(0x%"HWADDR_PRIx", %d) = 0x%"PRIx64"\n",
1273 __func__, region->offset + addr, size, data);
1278 static const MemoryRegionOps vfio_vga_ops = {
1279 .read = vfio_vga_read,
1280 .write = vfio_vga_write,
1281 .endianness = DEVICE_LITTLE_ENDIAN,
1285 * Device specific quirks
1288 /* Is range1 fully contained within range2? */
1289 static bool vfio_range_contained(uint64_t first1, uint64_t len1,
1290 uint64_t first2, uint64_t len2) {
1291 return (first1 >= first2 && first1 + len1 <= first2 + len2);
1294 static bool vfio_flags_enabled(uint8_t flags, uint8_t mask)
1296 return (mask && (flags & mask) == mask);
1299 static uint64_t vfio_generic_window_quirk_read(void *opaque,
1300 hwaddr addr, unsigned size)
1302 VFIOQuirk *quirk = opaque;
1303 VFIODevice *vdev = quirk->vdev;
1306 if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) &&
1307 ranges_overlap(addr, size,
1308 quirk->data.data_offset, quirk->data.data_size)) {
1309 hwaddr offset = addr - quirk->data.data_offset;
1311 if (!vfio_range_contained(addr, size, quirk->data.data_offset,
1312 quirk->data.data_size)) {
1313 hw_error("%s: window data read not fully contained: %s\n",
1314 __func__, memory_region_name(&quirk->mem));
1317 data = vfio_pci_read_config(&vdev->pdev,
1318 quirk->data.address_val + offset, size);
1320 DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", %d) = 0x%"
1321 PRIx64"\n", memory_region_name(&quirk->mem), vdev->host.domain,
1322 vdev->host.bus, vdev->host.slot, vdev->host.function,
1323 quirk->data.bar, addr, size, data);
1325 data = vfio_bar_read(&vdev->bars[quirk->data.bar],
1326 addr + quirk->data.base_offset, size);
1332 static void vfio_generic_window_quirk_write(void *opaque, hwaddr addr,
1333 uint64_t data, unsigned size)
1335 VFIOQuirk *quirk = opaque;
1336 VFIODevice *vdev = quirk->vdev;
1338 if (ranges_overlap(addr, size,
1339 quirk->data.address_offset, quirk->data.address_size)) {
1341 if (addr != quirk->data.address_offset) {
1342 hw_error("%s: offset write into address window: %s\n",
1343 __func__, memory_region_name(&quirk->mem));
1346 if ((data & ~quirk->data.address_mask) == quirk->data.address_match) {
1347 quirk->data.flags |= quirk->data.write_flags |
1348 quirk->data.read_flags;
1349 quirk->data.address_val = data & quirk->data.address_mask;
1351 quirk->data.flags &= ~(quirk->data.write_flags |
1352 quirk->data.read_flags);
1356 if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) &&
1357 ranges_overlap(addr, size,
1358 quirk->data.data_offset, quirk->data.data_size)) {
1359 hwaddr offset = addr - quirk->data.data_offset;
1361 if (!vfio_range_contained(addr, size, quirk->data.data_offset,
1362 quirk->data.data_size)) {
1363 hw_error("%s: window data write not fully contained: %s\n",
1364 __func__, memory_region_name(&quirk->mem));
1367 vfio_pci_write_config(&vdev->pdev,
1368 quirk->data.address_val + offset, data, size);
1369 DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%"
1370 PRIx64", %d)\n", memory_region_name(&quirk->mem),
1371 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1372 vdev->host.function, quirk->data.bar, addr, data, size);
1376 vfio_bar_write(&vdev->bars[quirk->data.bar],
1377 addr + quirk->data.base_offset, data, size);
1380 static const MemoryRegionOps vfio_generic_window_quirk = {
1381 .read = vfio_generic_window_quirk_read,
1382 .write = vfio_generic_window_quirk_write,
1383 .endianness = DEVICE_LITTLE_ENDIAN,
1386 static uint64_t vfio_generic_quirk_read(void *opaque,
1387 hwaddr addr, unsigned size)
1389 VFIOQuirk *quirk = opaque;
1390 VFIODevice *vdev = quirk->vdev;
1391 hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK;
1392 hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK;
1395 if (vfio_flags_enabled(quirk->data.flags, quirk->data.read_flags) &&
1396 ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) {
1397 if (!vfio_range_contained(addr, size, offset,
1398 quirk->data.address_mask + 1)) {
1399 hw_error("%s: read not fully contained: %s\n",
1400 __func__, memory_region_name(&quirk->mem));
1403 data = vfio_pci_read_config(&vdev->pdev, addr - offset, size);
1405 DPRINTF("%s read(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", %d) = 0x%"
1406 PRIx64"\n", memory_region_name(&quirk->mem), vdev->host.domain,
1407 vdev->host.bus, vdev->host.slot, vdev->host.function,
1408 quirk->data.bar, addr + base, size, data);
1410 data = vfio_bar_read(&vdev->bars[quirk->data.bar], addr + base, size);
1416 static void vfio_generic_quirk_write(void *opaque, hwaddr addr,
1417 uint64_t data, unsigned size)
1419 VFIOQuirk *quirk = opaque;
1420 VFIODevice *vdev = quirk->vdev;
1421 hwaddr base = quirk->data.address_match & TARGET_PAGE_MASK;
1422 hwaddr offset = quirk->data.address_match & ~TARGET_PAGE_MASK;
1424 if (vfio_flags_enabled(quirk->data.flags, quirk->data.write_flags) &&
1425 ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) {
1426 if (!vfio_range_contained(addr, size, offset,
1427 quirk->data.address_mask + 1)) {
1428 hw_error("%s: write not fully contained: %s\n",
1429 __func__, memory_region_name(&quirk->mem));
1432 vfio_pci_write_config(&vdev->pdev, addr - offset, data, size);
1434 DPRINTF("%s write(%04x:%02x:%02x.%x:BAR%d+0x%"HWADDR_PRIx", 0x%"
1435 PRIx64", %d)\n", memory_region_name(&quirk->mem),
1436 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1437 vdev->host.function, quirk->data.bar, addr + base, data, size);
1439 vfio_bar_write(&vdev->bars[quirk->data.bar], addr + base, data, size);
1443 static const MemoryRegionOps vfio_generic_quirk = {
1444 .read = vfio_generic_quirk_read,
1445 .write = vfio_generic_quirk_write,
1446 .endianness = DEVICE_LITTLE_ENDIAN,
1449 #define PCI_VENDOR_ID_ATI 0x1002
1452 * Radeon HD cards (HD5450 & HD7850) report the upper byte of the I/O port BAR
1453 * through VGA register 0x3c3. On newer cards, the I/O port BAR is always
1454 * BAR4 (older cards like the X550 used BAR1, but we don't care to support
1455 * those). Note that on bare metal, a read of 0x3c3 doesn't always return the
1456 * I/O port BAR address. Originally this was coded to return the virtual BAR
1457 * address only if the physical register read returns the actual BAR address,
1458 * but users have reported greater success if we return the virtual address
1461 static uint64_t vfio_ati_3c3_quirk_read(void *opaque,
1462 hwaddr addr, unsigned size)
1464 VFIOQuirk *quirk = opaque;
1465 VFIODevice *vdev = quirk->vdev;
1466 uint64_t data = vfio_pci_read_config(&vdev->pdev,
1467 PCI_BASE_ADDRESS_0 + (4 * 4) + 1,
1469 DPRINTF("%s(0x3c3, 1) = 0x%"PRIx64"\n", __func__, data);
1474 static const MemoryRegionOps vfio_ati_3c3_quirk = {
1475 .read = vfio_ati_3c3_quirk_read,
1476 .endianness = DEVICE_LITTLE_ENDIAN,
1479 static void vfio_vga_probe_ati_3c3_quirk(VFIODevice *vdev)
1481 PCIDevice *pdev = &vdev->pdev;
1484 if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) {
1489 * As long as the BAR is >= 256 bytes it will be aligned such that the
1490 * lower byte is always zero. Filter out anything else, if it exists.
1492 if (!vdev->bars[4].ioport || vdev->bars[4].size < 256) {
1496 quirk = g_malloc0(sizeof(*quirk));
1499 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_ati_3c3_quirk, quirk,
1500 "vfio-ati-3c3-quirk", 1);
1501 memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem,
1502 3 /* offset 3 bytes from 0x3c0 */, &quirk->mem);
1504 QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks,
1507 DPRINTF("Enabled ATI/AMD quirk 0x3c3 BAR4for device %04x:%02x:%02x.%x\n",
1508 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1509 vdev->host.function);
1513 * Newer ATI/AMD devices, including HD5450 and HD7850, have a window to PCI
1514 * config space through MMIO BAR2 at offset 0x4000. Nothing seems to access
1515 * the MMIO space directly, but a window to this space is provided through
1516 * I/O port BAR4. Offset 0x0 is the address register and offset 0x4 is the
1517 * data register. When the address is programmed to a range of 0x4000-0x4fff
1518 * PCI configuration space is available. Experimentation seems to indicate
1519 * that only read-only access is provided, but we drop writes when the window
1520 * is enabled to config space nonetheless.
1522 static void vfio_probe_ati_bar4_window_quirk(VFIODevice *vdev, int nr)
1524 PCIDevice *pdev = &vdev->pdev;
1527 if (!vdev->has_vga || nr != 4 ||
1528 pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) {
1532 quirk = g_malloc0(sizeof(*quirk));
1534 quirk->data.address_size = 4;
1535 quirk->data.data_offset = 4;
1536 quirk->data.data_size = 4;
1537 quirk->data.address_match = 0x4000;
1538 quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1;
1539 quirk->data.bar = nr;
1540 quirk->data.read_flags = quirk->data.write_flags = 1;
1542 memory_region_init_io(&quirk->mem, OBJECT(vdev),
1543 &vfio_generic_window_quirk, quirk,
1544 "vfio-ati-bar4-window-quirk", 8);
1545 memory_region_add_subregion_overlap(&vdev->bars[nr].mem,
1546 quirk->data.base_offset, &quirk->mem, 1);
1548 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
1550 DPRINTF("Enabled ATI/AMD BAR4 window quirk for device %04x:%02x:%02x.%x\n",
1551 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1552 vdev->host.function);
1556 * Trap the BAR2 MMIO window to config space as well.
1558 static void vfio_probe_ati_bar2_4000_quirk(VFIODevice *vdev, int nr)
1560 PCIDevice *pdev = &vdev->pdev;
1563 /* Only enable on newer devices where BAR2 is 64bit */
1564 if (!vdev->has_vga || nr != 2 || !vdev->bars[2].mem64 ||
1565 pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_ATI) {
1569 quirk = g_malloc0(sizeof(*quirk));
1571 quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1;
1572 quirk->data.address_match = 0x4000;
1573 quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1;
1574 quirk->data.bar = nr;
1576 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk,
1577 "vfio-ati-bar2-4000-quirk",
1578 TARGET_PAGE_ALIGN(quirk->data.address_mask + 1));
1579 memory_region_add_subregion_overlap(&vdev->bars[nr].mem,
1580 quirk->data.address_match & TARGET_PAGE_MASK,
1583 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
1585 DPRINTF("Enabled ATI/AMD BAR2 0x4000 quirk for device %04x:%02x:%02x.%x\n",
1586 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1587 vdev->host.function);
1591 * Older ATI/AMD cards like the X550 have a similar window to that above.
1592 * I/O port BAR1 provides a window to a mirror of PCI config space located
1593 * in BAR2 at offset 0xf00. We don't care to support such older cards, but
1594 * note it for future reference.
1597 #define PCI_VENDOR_ID_NVIDIA 0x10de
1600 * Nvidia has several different methods to get to config space, the
1601 * nouveu project has several of these documented here:
1602 * https://github.com/pathscale/envytools/tree/master/hwdocs
1604 * The first quirk is actually not documented in envytools and is found
1605 * on 10de:01d1 (NVIDIA Corporation G72 [GeForce 7300 LE]). This is an
1606 * NV46 chipset. The backdoor uses the legacy VGA I/O ports to access
1607 * the mirror of PCI config space found at BAR0 offset 0x1800. The access
1608 * sequence first writes 0x338 to I/O port 0x3d4. The target offset is
1609 * then written to 0x3d0. Finally 0x538 is written for a read and 0x738
1610 * is written for a write to 0x3d4. The BAR0 offset is then accessible
1611 * through 0x3d0. This quirk doesn't seem to be necessary on newer cards
1612 * that use the I/O port BAR5 window but it doesn't hurt to leave it.
1622 static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque,
1623 hwaddr addr, unsigned size)
1625 VFIOQuirk *quirk = opaque;
1626 VFIODevice *vdev = quirk->vdev;
1627 PCIDevice *pdev = &vdev->pdev;
1628 uint64_t data = vfio_vga_read(&vdev->vga.region[QEMU_PCI_VGA_IO_HI],
1629 addr + quirk->data.base_offset, size);
1631 if (quirk->data.flags == NV_3D0_READ && addr == quirk->data.data_offset) {
1632 data = vfio_pci_read_config(pdev, quirk->data.address_val, size);
1633 DPRINTF("%s(0x3d0, %d) = 0x%"PRIx64"\n", __func__, size, data);
1636 quirk->data.flags = NV_3D0_NONE;
1641 static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr,
1642 uint64_t data, unsigned size)
1644 VFIOQuirk *quirk = opaque;
1645 VFIODevice *vdev = quirk->vdev;
1646 PCIDevice *pdev = &vdev->pdev;
1648 switch (quirk->data.flags) {
1650 if (addr == quirk->data.address_offset && data == 0x338) {
1651 quirk->data.flags = NV_3D0_SELECT;
1655 quirk->data.flags = NV_3D0_NONE;
1656 if (addr == quirk->data.data_offset &&
1657 (data & ~quirk->data.address_mask) == quirk->data.address_match) {
1658 quirk->data.flags = NV_3D0_WINDOW;
1659 quirk->data.address_val = data & quirk->data.address_mask;
1663 quirk->data.flags = NV_3D0_NONE;
1664 if (addr == quirk->data.address_offset) {
1665 if (data == 0x538) {
1666 quirk->data.flags = NV_3D0_READ;
1667 } else if (data == 0x738) {
1668 quirk->data.flags = NV_3D0_WRITE;
1673 quirk->data.flags = NV_3D0_NONE;
1674 if (addr == quirk->data.data_offset) {
1675 vfio_pci_write_config(pdev, quirk->data.address_val, data, size);
1676 DPRINTF("%s(0x3d0, 0x%"PRIx64", %d)\n", __func__, data, size);
1682 vfio_vga_write(&vdev->vga.region[QEMU_PCI_VGA_IO_HI],
1683 addr + quirk->data.base_offset, data, size);
1686 static const MemoryRegionOps vfio_nvidia_3d0_quirk = {
1687 .read = vfio_nvidia_3d0_quirk_read,
1688 .write = vfio_nvidia_3d0_quirk_write,
1689 .endianness = DEVICE_LITTLE_ENDIAN,
1692 static void vfio_vga_probe_nvidia_3d0_quirk(VFIODevice *vdev)
1694 PCIDevice *pdev = &vdev->pdev;
1697 if (pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA ||
1698 !vdev->bars[1].size) {
1702 quirk = g_malloc0(sizeof(*quirk));
1704 quirk->data.base_offset = 0x10;
1705 quirk->data.address_offset = 4;
1706 quirk->data.address_size = 2;
1707 quirk->data.address_match = 0x1800;
1708 quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1;
1709 quirk->data.data_offset = 0;
1710 quirk->data.data_size = 4;
1712 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_nvidia_3d0_quirk,
1713 quirk, "vfio-nvidia-3d0-quirk", 6);
1714 memory_region_add_subregion(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem,
1715 quirk->data.base_offset, &quirk->mem);
1717 QLIST_INSERT_HEAD(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks,
1720 DPRINTF("Enabled NVIDIA VGA 0x3d0 quirk for device %04x:%02x:%02x.%x\n",
1721 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1722 vdev->host.function);
1726 * The second quirk is documented in envytools. The I/O port BAR5 is just
1727 * a set of address/data ports to the MMIO BARs. The BAR we care about is
1728 * again BAR0. This backdoor is apparently a bit newer than the one above
1729 * so we need to not only trap 256 bytes @0x1800, but all of PCI config
1730 * space, including extended space is available at the 4k @0x88000.
1733 NV_BAR5_ADDRESS = 0x1,
1734 NV_BAR5_ENABLE = 0x2,
1735 NV_BAR5_MASTER = 0x4,
1736 NV_BAR5_VALID = 0x7,
1739 static void vfio_nvidia_bar5_window_quirk_write(void *opaque, hwaddr addr,
1740 uint64_t data, unsigned size)
1742 VFIOQuirk *quirk = opaque;
1747 quirk->data.flags |= NV_BAR5_MASTER;
1749 quirk->data.flags &= ~NV_BAR5_MASTER;
1754 quirk->data.flags |= NV_BAR5_ENABLE;
1756 quirk->data.flags &= ~NV_BAR5_ENABLE;
1760 if (quirk->data.flags & NV_BAR5_MASTER) {
1761 if ((data & ~0xfff) == 0x88000) {
1762 quirk->data.flags |= NV_BAR5_ADDRESS;
1763 quirk->data.address_val = data & 0xfff;
1764 } else if ((data & ~0xff) == 0x1800) {
1765 quirk->data.flags |= NV_BAR5_ADDRESS;
1766 quirk->data.address_val = data & 0xff;
1768 quirk->data.flags &= ~NV_BAR5_ADDRESS;
1774 vfio_generic_window_quirk_write(opaque, addr, data, size);
1777 static const MemoryRegionOps vfio_nvidia_bar5_window_quirk = {
1778 .read = vfio_generic_window_quirk_read,
1779 .write = vfio_nvidia_bar5_window_quirk_write,
1780 .valid.min_access_size = 4,
1781 .endianness = DEVICE_LITTLE_ENDIAN,
1784 static void vfio_probe_nvidia_bar5_window_quirk(VFIODevice *vdev, int nr)
1786 PCIDevice *pdev = &vdev->pdev;
1789 if (!vdev->has_vga || nr != 5 ||
1790 pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) {
1794 quirk = g_malloc0(sizeof(*quirk));
1796 quirk->data.read_flags = quirk->data.write_flags = NV_BAR5_VALID;
1797 quirk->data.address_offset = 0x8;
1798 quirk->data.address_size = 0; /* actually 4, but avoids generic code */
1799 quirk->data.data_offset = 0xc;
1800 quirk->data.data_size = 4;
1801 quirk->data.bar = nr;
1803 memory_region_init_io(&quirk->mem, OBJECT(vdev),
1804 &vfio_nvidia_bar5_window_quirk, quirk,
1805 "vfio-nvidia-bar5-window-quirk", 16);
1806 memory_region_add_subregion_overlap(&vdev->bars[nr].mem, 0, &quirk->mem, 1);
1808 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
1810 DPRINTF("Enabled NVIDIA BAR5 window quirk for device %04x:%02x:%02x.%x\n",
1811 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1812 vdev->host.function);
1816 * Finally, BAR0 itself. We want to redirect any accesses to either
1817 * 0x1800 or 0x88000 through the PCI config space access functions.
1819 * NB - quirk at a page granularity or else they don't seem to work when
1822 * Here's offset 0x88000...
1824 static void vfio_probe_nvidia_bar0_88000_quirk(VFIODevice *vdev, int nr)
1826 PCIDevice *pdev = &vdev->pdev;
1829 if (!vdev->has_vga || nr != 0 ||
1830 pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) {
1834 quirk = g_malloc0(sizeof(*quirk));
1836 quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1;
1837 quirk->data.address_match = 0x88000;
1838 quirk->data.address_mask = PCIE_CONFIG_SPACE_SIZE - 1;
1839 quirk->data.bar = nr;
1841 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk,
1842 quirk, "vfio-nvidia-bar0-88000-quirk",
1843 TARGET_PAGE_ALIGN(quirk->data.address_mask + 1));
1844 memory_region_add_subregion_overlap(&vdev->bars[nr].mem,
1845 quirk->data.address_match & TARGET_PAGE_MASK,
1848 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
1850 DPRINTF("Enabled NVIDIA BAR0 0x88000 quirk for device %04x:%02x:%02x.%x\n",
1851 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1852 vdev->host.function);
1856 * And here's the same for BAR0 offset 0x1800...
1858 static void vfio_probe_nvidia_bar0_1800_quirk(VFIODevice *vdev, int nr)
1860 PCIDevice *pdev = &vdev->pdev;
1863 if (!vdev->has_vga || nr != 0 ||
1864 pci_get_word(pdev->config + PCI_VENDOR_ID) != PCI_VENDOR_ID_NVIDIA) {
1868 /* Log the chipset ID */
1869 DPRINTF("Nvidia NV%02x\n",
1870 (unsigned int)(vfio_bar_read(&vdev->bars[0], 0, 4) >> 20) & 0xff);
1872 quirk = g_malloc0(sizeof(*quirk));
1874 quirk->data.flags = quirk->data.read_flags = quirk->data.write_flags = 1;
1875 quirk->data.address_match = 0x1800;
1876 quirk->data.address_mask = PCI_CONFIG_SPACE_SIZE - 1;
1877 quirk->data.bar = nr;
1879 memory_region_init_io(&quirk->mem, OBJECT(vdev), &vfio_generic_quirk, quirk,
1880 "vfio-nvidia-bar0-1800-quirk",
1881 TARGET_PAGE_ALIGN(quirk->data.address_mask + 1));
1882 memory_region_add_subregion_overlap(&vdev->bars[nr].mem,
1883 quirk->data.address_match & TARGET_PAGE_MASK,
1886 QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
1888 DPRINTF("Enabled NVIDIA BAR0 0x1800 quirk for device %04x:%02x:%02x.%x\n",
1889 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1890 vdev->host.function);
1894 * TODO - Some Nvidia devices provide config access to their companion HDA
1895 * device and even to their parent bridge via these config space mirrors.
1896 * Add quirks for those regions.
1900 * Common quirk probe entry points.
1902 static void vfio_vga_quirk_setup(VFIODevice *vdev)
1904 vfio_vga_probe_ati_3c3_quirk(vdev);
1905 vfio_vga_probe_nvidia_3d0_quirk(vdev);
1908 static void vfio_vga_quirk_teardown(VFIODevice *vdev)
1912 for (i = 0; i < ARRAY_SIZE(vdev->vga.region); i++) {
1913 while (!QLIST_EMPTY(&vdev->vga.region[i].quirks)) {
1914 VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga.region[i].quirks);
1915 memory_region_del_subregion(&vdev->vga.region[i].mem, &quirk->mem);
1916 QLIST_REMOVE(quirk, next);
1922 static void vfio_bar_quirk_setup(VFIODevice *vdev, int nr)
1924 vfio_probe_ati_bar4_window_quirk(vdev, nr);
1925 vfio_probe_ati_bar2_4000_quirk(vdev, nr);
1926 vfio_probe_nvidia_bar5_window_quirk(vdev, nr);
1927 vfio_probe_nvidia_bar0_88000_quirk(vdev, nr);
1928 vfio_probe_nvidia_bar0_1800_quirk(vdev, nr);
1931 static void vfio_bar_quirk_teardown(VFIODevice *vdev, int nr)
1933 VFIOBAR *bar = &vdev->bars[nr];
1935 while (!QLIST_EMPTY(&bar->quirks)) {
1936 VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks);
1937 memory_region_del_subregion(&bar->mem, &quirk->mem);
1938 QLIST_REMOVE(quirk, next);
1946 static uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
1948 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
1949 uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
1951 memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
1952 emu_bits = le32_to_cpu(emu_bits);
1955 emu_val = pci_default_read_config(pdev, addr, len);
1958 if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
1961 ret = pread(vdev->fd, &phys_val, len, vdev->config_offset + addr);
1963 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x) failed: %m",
1964 __func__, vdev->host.domain, vdev->host.bus,
1965 vdev->host.slot, vdev->host.function, addr, len);
1968 phys_val = le32_to_cpu(phys_val);
1971 val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
1973 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, len=0x%x) %x\n", __func__,
1974 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1975 vdev->host.function, addr, len, val);
1980 static void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr,
1981 uint32_t val, int len)
1983 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
1984 uint32_t val_le = cpu_to_le32(val);
1986 DPRINTF("%s(%04x:%02x:%02x.%x, @0x%x, 0x%x, len=0x%x)\n", __func__,
1987 vdev->host.domain, vdev->host.bus, vdev->host.slot,
1988 vdev->host.function, addr, val, len);
1990 /* Write everything to VFIO, let it filter out what we can't write */
1991 if (pwrite(vdev->fd, &val_le, len, vdev->config_offset + addr) != len) {
1992 error_report("%s(%04x:%02x:%02x.%x, 0x%x, 0x%x, 0x%x) failed: %m",
1993 __func__, vdev->host.domain, vdev->host.bus,
1994 vdev->host.slot, vdev->host.function, addr, val, len);
1997 /* MSI/MSI-X Enabling/Disabling */
1998 if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
1999 ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
2000 int is_enabled, was_enabled = msi_enabled(pdev);
2002 pci_default_write_config(pdev, addr, val, len);
2004 is_enabled = msi_enabled(pdev);
2008 vfio_enable_msi(vdev);
2012 vfio_disable_msi(vdev);
2014 vfio_update_msi(vdev);
2017 } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
2018 ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
2019 int is_enabled, was_enabled = msix_enabled(pdev);
2021 pci_default_write_config(pdev, addr, val, len);
2023 is_enabled = msix_enabled(pdev);
2025 if (!was_enabled && is_enabled) {
2026 vfio_enable_msix(vdev);
2027 } else if (was_enabled && !is_enabled) {
2028 vfio_disable_msix(vdev);
2031 /* Write everything to QEMU to keep emulated bits correct */
2032 pci_default_write_config(pdev, addr, val, len);
2037 * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
2039 static int vfio_dma_unmap(VFIOContainer *container,
2040 hwaddr iova, ram_addr_t size)
2042 struct vfio_iommu_type1_dma_unmap unmap = {
2043 .argsz = sizeof(unmap),
2049 if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
2050 DPRINTF("VFIO_UNMAP_DMA: %d\n", -errno);
2057 static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
2058 ram_addr_t size, void *vaddr, bool readonly)
2060 struct vfio_iommu_type1_dma_map map = {
2061 .argsz = sizeof(map),
2062 .flags = VFIO_DMA_MAP_FLAG_READ,
2063 .vaddr = (__u64)(uintptr_t)vaddr,
2069 map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
2073 * Try the mapping, if it fails with EBUSY, unmap the region and try
2074 * again. This shouldn't be necessary, but we sometimes see it in
2075 * the the VGA ROM space.
2077 if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
2078 (errno == EBUSY && vfio_dma_unmap(container, iova, size) == 0 &&
2079 ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
2083 DPRINTF("VFIO_MAP_DMA: %d\n", -errno);
2087 static bool vfio_listener_skipped_section(MemoryRegionSection *section)
2089 return !memory_region_is_ram(section->mr);
2092 static void vfio_listener_region_add(MemoryListener *listener,
2093 MemoryRegionSection *section)
2095 VFIOContainer *container = container_of(listener, VFIOContainer,
2096 iommu_data.listener);
2101 assert(!memory_region_is_iommu(section->mr));
2103 if (vfio_listener_skipped_section(section)) {
2104 DPRINTF("SKIPPING region_add %"HWADDR_PRIx" - %"PRIx64"\n",
2105 section->offset_within_address_space,
2106 section->offset_within_address_space +
2107 int128_get64(int128_sub(section->size, int128_one())));
2111 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
2112 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
2113 error_report("%s received unaligned region", __func__);
2117 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
2118 end = (section->offset_within_address_space + int128_get64(section->size)) &
2125 vaddr = memory_region_get_ram_ptr(section->mr) +
2126 section->offset_within_region +
2127 (iova - section->offset_within_address_space);
2129 DPRINTF("region_add %"HWADDR_PRIx" - %"HWADDR_PRIx" [%p]\n",
2130 iova, end - 1, vaddr);
2132 memory_region_ref(section->mr);
2133 ret = vfio_dma_map(container, iova, end - iova, vaddr, section->readonly);
2135 error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
2136 "0x%"HWADDR_PRIx", %p) = %d (%m)",
2137 container, iova, end - iova, vaddr, ret);
2141 static void vfio_listener_region_del(MemoryListener *listener,
2142 MemoryRegionSection *section)
2144 VFIOContainer *container = container_of(listener, VFIOContainer,
2145 iommu_data.listener);
2149 if (vfio_listener_skipped_section(section)) {
2150 DPRINTF("SKIPPING region_del %"HWADDR_PRIx" - %"PRIx64"\n",
2151 section->offset_within_address_space,
2152 section->offset_within_address_space +
2153 int128_get64(int128_sub(section->size, int128_one())));
2157 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
2158 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
2159 error_report("%s received unaligned region", __func__);
2163 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
2164 end = (section->offset_within_address_space + int128_get64(section->size)) &
2171 DPRINTF("region_del %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
2174 ret = vfio_dma_unmap(container, iova, end - iova);
2175 memory_region_unref(section->mr);
2177 error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
2178 "0x%"HWADDR_PRIx") = %d (%m)",
2179 container, iova, end - iova, ret);
2183 static MemoryListener vfio_memory_listener = {
2184 .region_add = vfio_listener_region_add,
2185 .region_del = vfio_listener_region_del,
2188 static void vfio_listener_release(VFIOContainer *container)
2190 memory_listener_unregister(&container->iommu_data.listener);
2196 static void vfio_disable_interrupts(VFIODevice *vdev)
2198 switch (vdev->interrupt) {
2200 vfio_disable_intx(vdev);
2203 vfio_disable_msi(vdev);
2206 vfio_disable_msix(vdev);
2211 static int vfio_setup_msi(VFIODevice *vdev, int pos)
2214 bool msi_64bit, msi_maskbit;
2217 if (pread(vdev->fd, &ctrl, sizeof(ctrl),
2218 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
2221 ctrl = le16_to_cpu(ctrl);
2223 msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
2224 msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
2225 entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
2227 DPRINTF("%04x:%02x:%02x.%x PCI MSI CAP @0x%x\n", vdev->host.domain,
2228 vdev->host.bus, vdev->host.slot, vdev->host.function, pos);
2230 ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit);
2232 if (ret == -ENOTSUP) {
2235 error_report("vfio: msi_init failed");
2238 vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
2244 * We don't have any control over how pci_add_capability() inserts
2245 * capabilities into the chain. In order to setup MSI-X we need a
2246 * MemoryRegion for the BAR. In order to setup the BAR and not
2247 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
2248 * need to first look for where the MSI-X table lives. So we
2249 * unfortunately split MSI-X setup across two functions.
2251 static int vfio_early_setup_msix(VFIODevice *vdev)
2255 uint32_t table, pba;
2257 pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
2262 if (pread(vdev->fd, &ctrl, sizeof(ctrl),
2263 vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
2267 if (pread(vdev->fd, &table, sizeof(table),
2268 vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
2272 if (pread(vdev->fd, &pba, sizeof(pba),
2273 vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
2277 ctrl = le16_to_cpu(ctrl);
2278 table = le32_to_cpu(table);
2279 pba = le32_to_cpu(pba);
2281 vdev->msix = g_malloc0(sizeof(*(vdev->msix)));
2282 vdev->msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
2283 vdev->msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
2284 vdev->msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
2285 vdev->msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
2286 vdev->msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
2288 DPRINTF("%04x:%02x:%02x.%x "
2289 "PCI MSI-X CAP @0x%x, BAR %d, offset 0x%x, entries %d\n",
2290 vdev->host.domain, vdev->host.bus, vdev->host.slot,
2291 vdev->host.function, pos, vdev->msix->table_bar,
2292 vdev->msix->table_offset, vdev->msix->entries);
2297 static int vfio_setup_msix(VFIODevice *vdev, int pos)
2301 ret = msix_init(&vdev->pdev, vdev->msix->entries,
2302 &vdev->bars[vdev->msix->table_bar].mem,
2303 vdev->msix->table_bar, vdev->msix->table_offset,
2304 &vdev->bars[vdev->msix->pba_bar].mem,
2305 vdev->msix->pba_bar, vdev->msix->pba_offset, pos);
2307 if (ret == -ENOTSUP) {
2310 error_report("vfio: msix_init failed");
2317 static void vfio_teardown_msi(VFIODevice *vdev)
2319 msi_uninit(&vdev->pdev);
2322 msix_uninit(&vdev->pdev, &vdev->bars[vdev->msix->table_bar].mem,
2323 &vdev->bars[vdev->msix->pba_bar].mem);
2330 static void vfio_mmap_set_enabled(VFIODevice *vdev, bool enabled)
2334 for (i = 0; i < PCI_ROM_SLOT; i++) {
2335 VFIOBAR *bar = &vdev->bars[i];
2341 memory_region_set_enabled(&bar->mmap_mem, enabled);
2342 if (vdev->msix && vdev->msix->table_bar == i) {
2343 memory_region_set_enabled(&vdev->msix->mmap_mem, enabled);
2348 static void vfio_unmap_bar(VFIODevice *vdev, int nr)
2350 VFIOBAR *bar = &vdev->bars[nr];
2356 vfio_bar_quirk_teardown(vdev, nr);
2358 memory_region_del_subregion(&bar->mem, &bar->mmap_mem);
2359 munmap(bar->mmap, memory_region_size(&bar->mmap_mem));
2361 if (vdev->msix && vdev->msix->table_bar == nr) {
2362 memory_region_del_subregion(&bar->mem, &vdev->msix->mmap_mem);
2363 munmap(vdev->msix->mmap, memory_region_size(&vdev->msix->mmap_mem));
2366 memory_region_destroy(&bar->mem);
2369 static int vfio_mmap_bar(VFIODevice *vdev, VFIOBAR *bar,
2370 MemoryRegion *mem, MemoryRegion *submem,
2371 void **map, size_t size, off_t offset,
2376 if (VFIO_ALLOW_MMAP && size && bar->flags & VFIO_REGION_INFO_FLAG_MMAP) {
2379 if (bar->flags & VFIO_REGION_INFO_FLAG_READ) {
2383 if (bar->flags & VFIO_REGION_INFO_FLAG_WRITE) {
2387 *map = mmap(NULL, size, prot, MAP_SHARED,
2388 bar->fd, bar->fd_offset + offset);
2389 if (*map == MAP_FAILED) {
2395 memory_region_init_ram_ptr(submem, OBJECT(vdev), name, size, *map);
2398 /* Create a zero sized sub-region to make cleanup easy. */
2399 memory_region_init(submem, OBJECT(vdev), name, 0);
2402 memory_region_add_subregion(mem, offset, submem);
2407 static void vfio_map_bar(VFIODevice *vdev, int nr)
2409 VFIOBAR *bar = &vdev->bars[nr];
2410 unsigned size = bar->size;
2416 /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
2421 snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d",
2422 vdev->host.domain, vdev->host.bus, vdev->host.slot,
2423 vdev->host.function, nr);
2425 /* Determine what type of BAR this is for registration */
2426 ret = pread(vdev->fd, &pci_bar, sizeof(pci_bar),
2427 vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
2428 if (ret != sizeof(pci_bar)) {
2429 error_report("vfio: Failed to read BAR %d (%m)", nr);
2433 pci_bar = le32_to_cpu(pci_bar);
2434 bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
2435 bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
2436 type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
2437 ~PCI_BASE_ADDRESS_MEM_MASK);
2439 /* A "slow" read/write mapping underlies all BARs */
2440 memory_region_init_io(&bar->mem, OBJECT(vdev), &vfio_bar_ops,
2442 pci_register_bar(&vdev->pdev, nr, type, &bar->mem);
2445 * We can't mmap areas overlapping the MSIX vector table, so we
2446 * potentially insert a direct-mapped subregion before and after it.
2448 if (vdev->msix && vdev->msix->table_bar == nr) {
2449 size = vdev->msix->table_offset & TARGET_PAGE_MASK;
2452 strncat(name, " mmap", sizeof(name) - strlen(name) - 1);
2453 if (vfio_mmap_bar(vdev, bar, &bar->mem,
2454 &bar->mmap_mem, &bar->mmap, size, 0, name)) {
2455 error_report("%s unsupported. Performance may be slow", name);
2458 if (vdev->msix && vdev->msix->table_bar == nr) {
2461 start = TARGET_PAGE_ALIGN(vdev->msix->table_offset +
2462 (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
2464 size = start < bar->size ? bar->size - start : 0;
2465 strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1);
2466 /* VFIOMSIXInfo contains another MemoryRegion for this mapping */
2467 if (vfio_mmap_bar(vdev, bar, &bar->mem, &vdev->msix->mmap_mem,
2468 &vdev->msix->mmap, size, start, name)) {
2469 error_report("%s unsupported. Performance may be slow", name);
2473 vfio_bar_quirk_setup(vdev, nr);
2476 static void vfio_map_bars(VFIODevice *vdev)
2480 for (i = 0; i < PCI_ROM_SLOT; i++) {
2481 vfio_map_bar(vdev, i);
2484 if (vdev->has_vga) {
2485 memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem,
2486 OBJECT(vdev), &vfio_vga_ops,
2487 &vdev->vga.region[QEMU_PCI_VGA_MEM],
2488 "vfio-vga-mmio@0xa0000",
2489 QEMU_PCI_VGA_MEM_SIZE);
2490 memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem,
2491 OBJECT(vdev), &vfio_vga_ops,
2492 &vdev->vga.region[QEMU_PCI_VGA_IO_LO],
2493 "vfio-vga-io@0x3b0",
2494 QEMU_PCI_VGA_IO_LO_SIZE);
2495 memory_region_init_io(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem,
2496 OBJECT(vdev), &vfio_vga_ops,
2497 &vdev->vga.region[QEMU_PCI_VGA_IO_HI],
2498 "vfio-vga-io@0x3c0",
2499 QEMU_PCI_VGA_IO_HI_SIZE);
2501 pci_register_vga(&vdev->pdev, &vdev->vga.region[QEMU_PCI_VGA_MEM].mem,
2502 &vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem,
2503 &vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem);
2504 vfio_vga_quirk_setup(vdev);
2508 static void vfio_unmap_bars(VFIODevice *vdev)
2512 for (i = 0; i < PCI_ROM_SLOT; i++) {
2513 vfio_unmap_bar(vdev, i);
2516 if (vdev->has_vga) {
2517 vfio_vga_quirk_teardown(vdev);
2518 pci_unregister_vga(&vdev->pdev);
2519 memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_MEM].mem);
2520 memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].mem);
2521 memory_region_destroy(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].mem);
2528 static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
2530 uint8_t tmp, next = 0xff;
2532 for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
2533 tmp = pdev->config[tmp + 1]) {
2534 if (tmp > pos && tmp < next) {
2542 static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
2544 pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
2547 static void vfio_add_emulated_word(VFIODevice *vdev, int pos,
2548 uint16_t val, uint16_t mask)
2550 vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
2551 vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
2552 vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
2555 static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
2557 pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
2560 static void vfio_add_emulated_long(VFIODevice *vdev, int pos,
2561 uint32_t val, uint32_t mask)
2563 vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
2564 vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
2565 vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
2568 static int vfio_setup_pcie_cap(VFIODevice *vdev, int pos, uint8_t size)
2573 flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
2574 type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
2576 if (type != PCI_EXP_TYPE_ENDPOINT &&
2577 type != PCI_EXP_TYPE_LEG_END &&
2578 type != PCI_EXP_TYPE_RC_END) {
2580 error_report("vfio: Assignment of PCIe type 0x%x "
2581 "devices is not currently supported", type);
2585 if (!pci_bus_is_express(vdev->pdev.bus)) {
2587 * Use express capability as-is on PCI bus. It doesn't make much
2588 * sense to even expose, but some drivers (ex. tg3) depend on it
2589 * and guests don't seem to be particular about it. We'll need
2590 * to revist this or force express devices to express buses if we
2591 * ever expose an IOMMU to the guest.
2593 } else if (pci_bus_is_root(vdev->pdev.bus)) {
2595 * On a Root Complex bus Endpoints become Root Complex Integrated
2596 * Endpoints, which changes the type and clears the LNK & LNK2 fields.
2598 if (type == PCI_EXP_TYPE_ENDPOINT) {
2599 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
2600 PCI_EXP_TYPE_RC_END << 4,
2601 PCI_EXP_FLAGS_TYPE);
2603 /* Link Capabilities, Status, and Control goes away */
2604 if (size > PCI_EXP_LNKCTL) {
2605 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
2606 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
2607 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
2609 #ifndef PCI_EXP_LNKCAP2
2610 #define PCI_EXP_LNKCAP2 44
2612 #ifndef PCI_EXP_LNKSTA2
2613 #define PCI_EXP_LNKSTA2 50
2615 /* Link 2 Capabilities, Status, and Control goes away */
2616 if (size > PCI_EXP_LNKCAP2) {
2617 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
2618 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
2619 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
2623 } else if (type == PCI_EXP_TYPE_LEG_END) {
2625 * Legacy endpoints don't belong on the root complex. Windows
2626 * seems to be happier with devices if we skip the capability.
2633 * Convert Root Complex Integrated Endpoints to regular endpoints.
2634 * These devices don't support LNK/LNK2 capabilities, so make them up.
2636 if (type == PCI_EXP_TYPE_RC_END) {
2637 vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
2638 PCI_EXP_TYPE_ENDPOINT << 4,
2639 PCI_EXP_FLAGS_TYPE);
2640 vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
2641 PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
2642 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
2645 /* Mark the Link Status bits as emulated to allow virtual negotiation */
2646 vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
2647 pci_get_word(vdev->pdev.config + pos +
2649 PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
2652 pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size);
2654 vdev->pdev.exp.exp_cap = pos;
2660 static void vfio_check_pcie_flr(VFIODevice *vdev, uint8_t pos)
2662 uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
2664 if (cap & PCI_EXP_DEVCAP_FLR) {
2665 DPRINTF("%04x:%02x:%02x.%x Supports FLR via PCIe cap\n",
2666 vdev->host.domain, vdev->host.bus, vdev->host.slot,
2667 vdev->host.function);
2668 vdev->has_flr = true;
2672 static void vfio_check_pm_reset(VFIODevice *vdev, uint8_t pos)
2674 uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
2676 if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
2677 DPRINTF("%04x:%02x:%02x.%x Supports PM reset\n",
2678 vdev->host.domain, vdev->host.bus, vdev->host.slot,
2679 vdev->host.function);
2680 vdev->has_pm_reset = true;
2684 static void vfio_check_af_flr(VFIODevice *vdev, uint8_t pos)
2686 uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
2688 if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
2689 DPRINTF("%04x:%02x:%02x.%x Supports FLR via AF cap\n",
2690 vdev->host.domain, vdev->host.bus, vdev->host.slot,
2691 vdev->host.function);
2692 vdev->has_flr = true;
2696 static int vfio_add_std_cap(VFIODevice *vdev, uint8_t pos)
2698 PCIDevice *pdev = &vdev->pdev;
2699 uint8_t cap_id, next, size;
2702 cap_id = pdev->config[pos];
2703 next = pdev->config[pos + 1];
2706 * If it becomes important to configure capabilities to their actual
2707 * size, use this as the default when it's something we don't recognize.
2708 * Since QEMU doesn't actually handle many of the config accesses,
2709 * exact size doesn't seem worthwhile.
2711 size = vfio_std_cap_max_size(pdev, pos);
2714 * pci_add_capability always inserts the new capability at the head
2715 * of the chain. Therefore to end up with a chain that matches the
2716 * physical device, we insert from the end by making this recursive.
2717 * This is also why we pre-caclulate size above as cached config space
2718 * will be changed as we unwind the stack.
2721 ret = vfio_add_std_cap(vdev, next);
2726 /* Begin the rebuild, use QEMU emulated list bits */
2727 pdev->config[PCI_CAPABILITY_LIST] = 0;
2728 vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
2729 vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
2732 /* Use emulated next pointer to allow dropping caps */
2733 pci_set_byte(vdev->emulated_config_bits + pos + 1, 0xff);
2736 case PCI_CAP_ID_MSI:
2737 ret = vfio_setup_msi(vdev, pos);
2739 case PCI_CAP_ID_EXP:
2740 vfio_check_pcie_flr(vdev, pos);
2741 ret = vfio_setup_pcie_cap(vdev, pos, size);
2743 case PCI_CAP_ID_MSIX:
2744 ret = vfio_setup_msix(vdev, pos);
2747 vfio_check_pm_reset(vdev, pos);
2749 ret = pci_add_capability(pdev, cap_id, pos, size);
2752 vfio_check_af_flr(vdev, pos);
2753 ret = pci_add_capability(pdev, cap_id, pos, size);
2756 ret = pci_add_capability(pdev, cap_id, pos, size);
2761 error_report("vfio: %04x:%02x:%02x.%x Error adding PCI capability "
2762 "0x%x[0x%x]@0x%x: %d", vdev->host.domain,
2763 vdev->host.bus, vdev->host.slot, vdev->host.function,
2764 cap_id, size, pos, ret);
2771 static int vfio_add_capabilities(VFIODevice *vdev)
2773 PCIDevice *pdev = &vdev->pdev;
2775 if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
2776 !pdev->config[PCI_CAPABILITY_LIST]) {
2777 return 0; /* Nothing to add */
2780 return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
2783 static void vfio_pci_pre_reset(VFIODevice *vdev)
2785 PCIDevice *pdev = &vdev->pdev;
2788 vfio_disable_interrupts(vdev);
2790 /* Make sure the device is in D0 */
2795 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
2796 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2798 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2799 vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
2800 /* vfio handles the necessary delay here */
2801 pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
2802 state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2804 error_report("vfio: Unable to power on device, stuck in D%d\n",
2811 * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
2812 * Also put INTx Disable in known state.
2814 cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
2815 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
2816 PCI_COMMAND_INTX_DISABLE);
2817 vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
2820 static void vfio_pci_post_reset(VFIODevice *vdev)
2822 vfio_enable_intx(vdev);
2825 static bool vfio_pci_host_match(PCIHostDeviceAddress *host1,
2826 PCIHostDeviceAddress *host2)
2828 return (host1->domain == host2->domain && host1->bus == host2->bus &&
2829 host1->slot == host2->slot && host1->function == host2->function);
2832 static int vfio_pci_hot_reset(VFIODevice *vdev, bool single)
2835 struct vfio_pci_hot_reset_info *info;
2836 struct vfio_pci_dependent_device *devices;
2837 struct vfio_pci_hot_reset *reset;
2842 DPRINTF("%s(%04x:%02x:%02x.%x) %s\n", __func__, vdev->host.domain,
2843 vdev->host.bus, vdev->host.slot, vdev->host.function,
2844 single ? "one" : "multi");
2846 vfio_pci_pre_reset(vdev);
2847 vdev->needs_reset = false;
2849 info = g_malloc0(sizeof(*info));
2850 info->argsz = sizeof(*info);
2852 ret = ioctl(vdev->fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
2853 if (ret && errno != ENOSPC) {
2855 if (!vdev->has_pm_reset) {
2856 error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, "
2857 "no available reset mechanism.", vdev->host.domain,
2858 vdev->host.bus, vdev->host.slot, vdev->host.function);
2863 count = info->count;
2864 info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
2865 info->argsz = sizeof(*info) + (count * sizeof(*devices));
2866 devices = &info->devices[0];
2868 ret = ioctl(vdev->fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
2871 error_report("vfio: hot reset info failed: %m");
2875 DPRINTF("%04x:%02x:%02x.%x: hot reset dependent devices:\n",
2876 vdev->host.domain, vdev->host.bus, vdev->host.slot,
2877 vdev->host.function);
2879 /* Verify that we have all the groups required */
2880 for (i = 0; i < info->count; i++) {
2881 PCIHostDeviceAddress host;
2884 host.domain = devices[i].segment;
2885 host.bus = devices[i].bus;
2886 host.slot = PCI_SLOT(devices[i].devfn);
2887 host.function = PCI_FUNC(devices[i].devfn);
2889 DPRINTF("\t%04x:%02x:%02x.%x group %d\n", host.domain,
2890 host.bus, host.slot, host.function, devices[i].group_id);
2892 if (vfio_pci_host_match(&host, &vdev->host)) {
2896 QLIST_FOREACH(group, &group_list, next) {
2897 if (group->groupid == devices[i].group_id) {
2903 if (!vdev->has_pm_reset) {
2904 error_report("vfio: Cannot reset device %04x:%02x:%02x.%x, "
2905 "depends on group %d which is not owned.",
2906 vdev->host.domain, vdev->host.bus, vdev->host.slot,
2907 vdev->host.function, devices[i].group_id);
2913 /* Prep dependent devices for reset and clear our marker. */
2914 QLIST_FOREACH(tmp, &group->device_list, next) {
2915 if (vfio_pci_host_match(&host, &tmp->host)) {
2917 DPRINTF("vfio: found another in-use device "
2918 "%04x:%02x:%02x.%x\n", host.domain, host.bus,
2919 host.slot, host.function);
2923 vfio_pci_pre_reset(tmp);
2924 tmp->needs_reset = false;
2931 if (!single && !multi) {
2932 DPRINTF("vfio: No other in-use devices for multi hot reset\n");
2937 /* Determine how many group fds need to be passed */
2939 QLIST_FOREACH(group, &group_list, next) {
2940 for (i = 0; i < info->count; i++) {
2941 if (group->groupid == devices[i].group_id) {
2948 reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
2949 reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
2950 fds = &reset->group_fds[0];
2952 /* Fill in group fds */
2953 QLIST_FOREACH(group, &group_list, next) {
2954 for (i = 0; i < info->count; i++) {
2955 if (group->groupid == devices[i].group_id) {
2956 fds[reset->count++] = group->fd;
2963 ret = ioctl(vdev->fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
2966 DPRINTF("%04x:%02x:%02x.%x hot reset: %s\n", vdev->host.domain,
2967 vdev->host.bus, vdev->host.slot, vdev->host.function,
2968 ret ? "%m" : "Success");
2971 /* Re-enable INTx on affected devices */
2972 for (i = 0; i < info->count; i++) {
2973 PCIHostDeviceAddress host;
2976 host.domain = devices[i].segment;
2977 host.bus = devices[i].bus;
2978 host.slot = PCI_SLOT(devices[i].devfn);
2979 host.function = PCI_FUNC(devices[i].devfn);
2981 if (vfio_pci_host_match(&host, &vdev->host)) {
2985 QLIST_FOREACH(group, &group_list, next) {
2986 if (group->groupid == devices[i].group_id) {
2995 QLIST_FOREACH(tmp, &group->device_list, next) {
2996 if (vfio_pci_host_match(&host, &tmp->host)) {
2997 vfio_pci_post_reset(tmp);
3003 vfio_pci_post_reset(vdev);
3010 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
3011 * of a single in-use device. VFIO_DEVICE_RESET will already handle the case
3012 * of doing hot resets when there is only a single device per bus. The in-use
3013 * here refers to how many VFIODevices are affected. A hot reset that affects
3014 * multiple devices, but only a single in-use device, means that we can call
3015 * it from our bus ->reset() callback since the extent is effectively a single
3016 * device. This allows us to make use of it in the hotplug path. When there
3017 * are multiple in-use devices, we can only trigger the hot reset during a
3018 * system reset and thus from our reset handler. We separate _one vs _multi
3019 * here so that we don't overlap and do a double reset on the system reset
3020 * path where both our reset handler and ->reset() callback are used. Calling
3021 * _one() will only do a hot reset for the one in-use devices case, calling
3022 * _multi() will do nothing if a _one() would have been sufficient.
3024 static int vfio_pci_hot_reset_one(VFIODevice *vdev)
3026 return vfio_pci_hot_reset(vdev, true);
3029 static int vfio_pci_hot_reset_multi(VFIODevice *vdev)
3031 return vfio_pci_hot_reset(vdev, false);
3034 static void vfio_pci_reset_handler(void *opaque)
3039 QLIST_FOREACH(group, &group_list, next) {
3040 QLIST_FOREACH(vdev, &group->device_list, next) {
3041 if (!vdev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
3042 vdev->needs_reset = true;
3047 QLIST_FOREACH(group, &group_list, next) {
3048 QLIST_FOREACH(vdev, &group->device_list, next) {
3049 if (vdev->needs_reset) {
3050 vfio_pci_hot_reset_multi(vdev);
3056 static int vfio_connect_container(VFIOGroup *group)
3058 VFIOContainer *container;
3061 if (group->container) {
3065 QLIST_FOREACH(container, &container_list, next) {
3066 if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
3067 group->container = container;
3068 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
3073 fd = qemu_open("/dev/vfio/vfio", O_RDWR);
3075 error_report("vfio: failed to open /dev/vfio/vfio: %m");
3079 ret = ioctl(fd, VFIO_GET_API_VERSION);
3080 if (ret != VFIO_API_VERSION) {
3081 error_report("vfio: supported vfio version: %d, "
3082 "reported version: %d", VFIO_API_VERSION, ret);
3087 container = g_malloc0(sizeof(*container));
3090 if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
3091 ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
3093 error_report("vfio: failed to set group container: %m");
3099 ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
3101 error_report("vfio: failed to set iommu for container: %m");
3107 container->iommu_data.listener = vfio_memory_listener;
3108 container->iommu_data.release = vfio_listener_release;
3110 memory_listener_register(&container->iommu_data.listener, &address_space_memory);
3112 error_report("vfio: No available IOMMU models");
3118 QLIST_INIT(&container->group_list);
3119 QLIST_INSERT_HEAD(&container_list, container, next);
3121 group->container = container;
3122 QLIST_INSERT_HEAD(&container->group_list, group, container_next);
3127 static void vfio_disconnect_container(VFIOGroup *group)
3129 VFIOContainer *container = group->container;
3131 if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
3132 error_report("vfio: error disconnecting group %d from container",
3136 QLIST_REMOVE(group, container_next);
3137 group->container = NULL;
3139 if (QLIST_EMPTY(&container->group_list)) {
3140 if (container->iommu_data.release) {
3141 container->iommu_data.release(container);
3143 QLIST_REMOVE(container, next);
3144 DPRINTF("vfio_disconnect_container: close container->fd\n");
3145 close(container->fd);
3150 static VFIOGroup *vfio_get_group(int groupid)
3154 struct vfio_group_status status = { .argsz = sizeof(status) };
3156 QLIST_FOREACH(group, &group_list, next) {
3157 if (group->groupid == groupid) {
3162 group = g_malloc0(sizeof(*group));
3164 snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
3165 group->fd = qemu_open(path, O_RDWR);
3166 if (group->fd < 0) {
3167 error_report("vfio: error opening %s: %m", path);
3172 if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
3173 error_report("vfio: error getting group status: %m");
3179 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
3180 error_report("vfio: error, group %d is not viable, please ensure "
3181 "all devices within the iommu_group are bound to their "
3182 "vfio bus driver.", groupid);
3188 group->groupid = groupid;
3189 QLIST_INIT(&group->device_list);
3191 if (vfio_connect_container(group)) {
3192 error_report("vfio: failed to setup container for group %d", groupid);
3198 if (QLIST_EMPTY(&group_list)) {
3199 qemu_register_reset(vfio_pci_reset_handler, NULL);
3202 QLIST_INSERT_HEAD(&group_list, group, next);
3207 static void vfio_put_group(VFIOGroup *group)
3209 if (!QLIST_EMPTY(&group->device_list)) {
3213 vfio_disconnect_container(group);
3214 QLIST_REMOVE(group, next);
3215 DPRINTF("vfio_put_group: close group->fd\n");
3219 if (QLIST_EMPTY(&group_list)) {
3220 qemu_unregister_reset(vfio_pci_reset_handler, NULL);
3224 static int vfio_get_device(VFIOGroup *group, const char *name, VFIODevice *vdev)
3226 struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
3227 struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
3228 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
3231 ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
3233 error_report("vfio: error getting device %s from group %d: %m",
3234 name, group->groupid);
3235 error_printf("Verify all devices in group %d are bound to vfio-pci "
3236 "or pci-stub and not already in use\n", group->groupid);
3241 vdev->group = group;
3242 QLIST_INSERT_HEAD(&group->device_list, vdev, next);
3244 /* Sanity check device */
3245 ret = ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &dev_info);
3247 error_report("vfio: error getting device info: %m");
3251 DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name,
3252 dev_info.flags, dev_info.num_regions, dev_info.num_irqs);
3254 if (!(dev_info.flags & VFIO_DEVICE_FLAGS_PCI)) {
3255 error_report("vfio: Um, this isn't a PCI device");
3259 vdev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
3261 if (dev_info.num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
3262 error_report("vfio: unexpected number of io regions %u",
3263 dev_info.num_regions);
3267 if (dev_info.num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
3268 error_report("vfio: unexpected number of irqs %u", dev_info.num_irqs);
3272 for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
3275 ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info);
3277 error_report("vfio: Error getting region %d info: %m", i);
3281 DPRINTF("Device %s region %d:\n", name, i);
3282 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
3283 (unsigned long)reg_info.size, (unsigned long)reg_info.offset,
3284 (unsigned long)reg_info.flags);
3286 vdev->bars[i].flags = reg_info.flags;
3287 vdev->bars[i].size = reg_info.size;
3288 vdev->bars[i].fd_offset = reg_info.offset;
3289 vdev->bars[i].fd = vdev->fd;
3290 vdev->bars[i].nr = i;
3291 QLIST_INIT(&vdev->bars[i].quirks);
3294 reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX;
3296 ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info);
3298 error_report("vfio: Error getting config info: %m");
3302 DPRINTF("Device %s config:\n", name);
3303 DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n",
3304 (unsigned long)reg_info.size, (unsigned long)reg_info.offset,
3305 (unsigned long)reg_info.flags);
3307 vdev->config_size = reg_info.size;
3308 if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
3309 vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
3311 vdev->config_offset = reg_info.offset;
3313 if ((vdev->features & VFIO_FEATURE_ENABLE_VGA) &&
3314 dev_info.num_regions > VFIO_PCI_VGA_REGION_INDEX) {
3315 struct vfio_region_info vga_info = {
3316 .argsz = sizeof(vga_info),
3317 .index = VFIO_PCI_VGA_REGION_INDEX,
3320 ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &vga_info);
3323 "vfio: Device does not support requested feature x-vga");
3327 if (!(vga_info.flags & VFIO_REGION_INFO_FLAG_READ) ||
3328 !(vga_info.flags & VFIO_REGION_INFO_FLAG_WRITE) ||
3329 vga_info.size < 0xbffff + 1) {
3330 error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx",
3331 (unsigned long)vga_info.flags,
3332 (unsigned long)vga_info.size);
3336 vdev->vga.fd_offset = vga_info.offset;
3337 vdev->vga.fd = vdev->fd;
3339 vdev->vga.region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
3340 vdev->vga.region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
3341 QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_MEM].quirks);
3343 vdev->vga.region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
3344 vdev->vga.region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
3345 QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].quirks);
3347 vdev->vga.region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
3348 vdev->vga.region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
3349 QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks);
3351 vdev->has_vga = true;
3353 irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
3355 ret = ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
3357 /* This can fail for an old kernel or legacy PCI dev */
3358 DPRINTF("VFIO_DEVICE_GET_IRQ_INFO failure: %m\n");
3360 } else if (irq_info.count == 1) {
3361 vdev->pci_aer = true;
3363 error_report("vfio: %04x:%02x:%02x.%x "
3364 "Could not enable error recovery for the device",
3365 vdev->host.domain, vdev->host.bus, vdev->host.slot,
3366 vdev->host.function);
3371 QLIST_REMOVE(vdev, next);
3378 static void vfio_put_device(VFIODevice *vdev)
3380 QLIST_REMOVE(vdev, next);
3382 DPRINTF("vfio_put_device: close vdev->fd\n");
3390 static void vfio_err_notifier_handler(void *opaque)
3392 VFIODevice *vdev = opaque;
3394 if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
3399 * TBD. Retrieve the error details and decide what action
3400 * needs to be taken. One of the actions could be to pass
3401 * the error to the guest and have the guest driver recover
3402 * from the error. This requires that PCIe capabilities be
3403 * exposed to the guest. For now, we just terminate the
3404 * guest to contain the error.
3407 error_report("%s(%04x:%02x:%02x.%x) Unrecoverable error detected. "
3408 "Please collect any data possible and then kill the guest",
3409 __func__, vdev->host.domain, vdev->host.bus,
3410 vdev->host.slot, vdev->host.function);
3412 vm_stop(RUN_STATE_IO_ERROR);
3416 * Registers error notifier for devices supporting error recovery.
3417 * If we encounter a failure in this function, we report an error
3418 * and continue after disabling error recovery support for the
3421 static void vfio_register_err_notifier(VFIODevice *vdev)
3425 struct vfio_irq_set *irq_set;
3428 if (!vdev->pci_aer) {
3432 if (event_notifier_init(&vdev->err_notifier, 0)) {
3433 error_report("vfio: Unable to init event notifier for error detection");
3434 vdev->pci_aer = false;
3438 argsz = sizeof(*irq_set) + sizeof(*pfd);
3440 irq_set = g_malloc0(argsz);
3441 irq_set->argsz = argsz;
3442 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
3443 VFIO_IRQ_SET_ACTION_TRIGGER;
3444 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
3447 pfd = (int32_t *)&irq_set->data;
3449 *pfd = event_notifier_get_fd(&vdev->err_notifier);
3450 qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev);
3452 ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
3454 error_report("vfio: Failed to set up error notification");
3455 qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
3456 event_notifier_cleanup(&vdev->err_notifier);
3457 vdev->pci_aer = false;
3462 static void vfio_unregister_err_notifier(VFIODevice *vdev)
3465 struct vfio_irq_set *irq_set;
3469 if (!vdev->pci_aer) {
3473 argsz = sizeof(*irq_set) + sizeof(*pfd);
3475 irq_set = g_malloc0(argsz);
3476 irq_set->argsz = argsz;
3477 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
3478 VFIO_IRQ_SET_ACTION_TRIGGER;
3479 irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
3482 pfd = (int32_t *)&irq_set->data;
3485 ret = ioctl(vdev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
3487 error_report("vfio: Failed to de-assign error fd: %m");
3490 qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
3492 event_notifier_cleanup(&vdev->err_notifier);
3495 static int vfio_initfn(PCIDevice *pdev)
3497 VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
3499 char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
3505 /* Check that the host device exists */
3506 snprintf(path, sizeof(path),
3507 "/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
3508 vdev->host.domain, vdev->host.bus, vdev->host.slot,
3509 vdev->host.function);
3510 if (stat(path, &st) < 0) {
3511 error_report("vfio: error: no such host device: %s", path);
3515 strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1);
3517 len = readlink(path, iommu_group_path, PATH_MAX);
3519 error_report("vfio: error no iommu_group for device");
3523 iommu_group_path[len] = 0;
3524 group_name = basename(iommu_group_path);
3526 if (sscanf(group_name, "%d", &groupid) != 1) {
3527 error_report("vfio: error reading %s: %m", path);
3531 DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain,
3532 vdev->host.bus, vdev->host.slot, vdev->host.function, groupid);
3534 group = vfio_get_group(groupid);
3536 error_report("vfio: failed to get group %d", groupid);
3540 snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x",
3541 vdev->host.domain, vdev->host.bus, vdev->host.slot,
3542 vdev->host.function);
3544 QLIST_FOREACH(pvdev, &group->device_list, next) {
3545 if (pvdev->host.domain == vdev->host.domain &&
3546 pvdev->host.bus == vdev->host.bus &&
3547 pvdev->host.slot == vdev->host.slot &&
3548 pvdev->host.function == vdev->host.function) {
3550 error_report("vfio: error: device %s is already attached", path);
3551 vfio_put_group(group);
3556 ret = vfio_get_device(group, path, vdev);
3558 error_report("vfio: failed to get device %s", path);
3559 vfio_put_group(group);
3563 /* Get a copy of config space */
3564 ret = pread(vdev->fd, vdev->pdev.config,
3565 MIN(pci_config_size(&vdev->pdev), vdev->config_size),
3566 vdev->config_offset);
3567 if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
3568 ret = ret < 0 ? -errno : -EFAULT;
3569 error_report("vfio: Failed to read device config space");
3573 /* vfio emulates a lot for us, but some bits need extra love */
3574 vdev->emulated_config_bits = g_malloc0(vdev->config_size);
3576 /* QEMU can choose to expose the ROM or not */
3577 memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
3579 /* QEMU can change multi-function devices to single function, or reverse */
3580 vdev->emulated_config_bits[PCI_HEADER_TYPE] =
3581 PCI_HEADER_TYPE_MULTI_FUNCTION;
3583 /* Restore or clear multifunction, this is always controlled by QEMU */
3584 if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
3585 vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
3587 vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
3591 * Clear host resource mapping info. If we choose not to register a
3592 * BAR, such as might be the case with the option ROM, we can get
3593 * confusing, unwritable, residual addresses from the host here.
3595 memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
3596 memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
3598 vfio_pci_size_rom(vdev);
3600 ret = vfio_early_setup_msix(vdev);
3605 vfio_map_bars(vdev);
3607 ret = vfio_add_capabilities(vdev);
3612 /* QEMU emulates all of MSI & MSIX */
3613 if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
3614 memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
3618 if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
3619 memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
3620 vdev->msi_cap_size);
3623 if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
3624 vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
3625 vfio_intx_mmap_enable, vdev);
3626 pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq);
3627 ret = vfio_enable_intx(vdev);
3633 add_boot_device_path(vdev->bootindex, &pdev->qdev, NULL);
3634 vfio_register_err_notifier(vdev);
3639 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
3640 vfio_teardown_msi(vdev);
3641 vfio_unmap_bars(vdev);
3643 g_free(vdev->emulated_config_bits);
3644 vfio_put_device(vdev);
3645 vfio_put_group(group);
3649 static void vfio_exitfn(PCIDevice *pdev)
3651 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
3652 VFIOGroup *group = vdev->group;
3654 vfio_unregister_err_notifier(vdev);
3655 pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
3656 vfio_disable_interrupts(vdev);
3657 if (vdev->intx.mmap_timer) {
3658 timer_free(vdev->intx.mmap_timer);
3660 vfio_teardown_msi(vdev);
3661 vfio_unmap_bars(vdev);
3662 g_free(vdev->emulated_config_bits);
3664 vfio_put_device(vdev);
3665 vfio_put_group(group);
3668 static void vfio_pci_reset(DeviceState *dev)
3670 PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
3671 VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
3673 DPRINTF("%s(%04x:%02x:%02x.%x)\n", __func__, vdev->host.domain,
3674 vdev->host.bus, vdev->host.slot, vdev->host.function);
3676 vfio_pci_pre_reset(vdev);
3678 if (vdev->reset_works && (vdev->has_flr || !vdev->has_pm_reset) &&
3679 !ioctl(vdev->fd, VFIO_DEVICE_RESET)) {
3680 DPRINTF("%04x:%02x:%02x.%x FLR/VFIO_DEVICE_RESET\n", vdev->host.domain,
3681 vdev->host.bus, vdev->host.slot, vdev->host.function);
3685 /* See if we can do our own bus reset */
3686 if (!vfio_pci_hot_reset_one(vdev)) {
3690 /* If nothing else works and the device supports PM reset, use it */
3691 if (vdev->reset_works && vdev->has_pm_reset &&
3692 !ioctl(vdev->fd, VFIO_DEVICE_RESET)) {
3693 DPRINTF("%04x:%02x:%02x.%x PCI PM Reset\n", vdev->host.domain,
3694 vdev->host.bus, vdev->host.slot, vdev->host.function);
3699 vfio_pci_post_reset(vdev);
3702 static Property vfio_pci_dev_properties[] = {
3703 DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIODevice, host),
3704 DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIODevice,
3705 intx.mmap_timeout, 1100),
3706 DEFINE_PROP_BIT("x-vga", VFIODevice, features,
3707 VFIO_FEATURE_ENABLE_VGA_BIT, false),
3708 DEFINE_PROP_INT32("bootindex", VFIODevice, bootindex, -1),
3710 * TODO - support passed fds... is this necessary?
3711 * DEFINE_PROP_STRING("vfiofd", VFIODevice, vfiofd_name),
3712 * DEFINE_PROP_STRING("vfiogroupfd, VFIODevice, vfiogroupfd_name),
3714 DEFINE_PROP_END_OF_LIST(),
3717 static const VMStateDescription vfio_pci_vmstate = {
3722 static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
3724 DeviceClass *dc = DEVICE_CLASS(klass);
3725 PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
3727 dc->reset = vfio_pci_reset;
3728 dc->props = vfio_pci_dev_properties;
3729 dc->vmsd = &vfio_pci_vmstate;
3730 dc->desc = "VFIO-based PCI device assignment";
3731 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
3732 pdc->init = vfio_initfn;
3733 pdc->exit = vfio_exitfn;
3734 pdc->config_read = vfio_pci_read_config;
3735 pdc->config_write = vfio_pci_write_config;
3736 pdc->is_express = 1; /* We might be */
3739 static const TypeInfo vfio_pci_dev_info = {
3741 .parent = TYPE_PCI_DEVICE,
3742 .instance_size = sizeof(VFIODevice),
3743 .class_init = vfio_pci_dev_class_init,
3746 static void register_vfio_pci_dev_type(void)
3748 type_register_static(&vfio_pci_dev_info);
3751 type_init(register_vfio_pci_dev_type)