]> rtime.felk.cvut.cz Git - lisovros/qemu_apohw.git/commitdiff
block: add the support to drain throttled requests
authorZhi Yong Wu <wuzhy@linux.vnet.ibm.com>
Thu, 12 Apr 2012 12:00:57 +0000 (14:00 +0200)
committerKevin Wolf <kwolf@redhat.com>
Thu, 10 May 2012 08:32:11 +0000 (10:32 +0200)
Signed-off-by: Zhi Yong Wu <wuzhy@linux.vnet.ibm.com>
[ Iterate until all block devices have processed all requests,
  add comments. - Paolo ]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
block.c

diff --git a/block.c b/block.c
index ee7d8f220f4aca5ba09dcd7a76e3c88a7104ba37..a307fe177fe004713fbc0c66e6e8b6afd41b6bbc 100644 (file)
--- a/block.c
+++ b/block.c
@@ -906,12 +906,31 @@ void bdrv_close_all(void)
  *
  * This function does not flush data to disk, use bdrv_flush_all() for that
  * after calling this function.
+ *
+ * Note that completion of an asynchronous I/O operation can trigger any
+ * number of other I/O operations on other devices---for example a coroutine
+ * can be arbitrarily complex and a constant flow of I/O can come until the
+ * coroutine is complete.  Because of this, it is not possible to have a
+ * function to drain a single device's I/O queue.
  */
 void bdrv_drain_all(void)
 {
     BlockDriverState *bs;
+    bool busy;
 
-    qemu_aio_flush();
+    do {
+        busy = qemu_aio_wait();
+
+        /* FIXME: We do not have timer support here, so this is effectively
+         * a busy wait.
+         */
+        QTAILQ_FOREACH(bs, &bdrv_states, list) {
+            if (!qemu_co_queue_empty(&bs->throttled_reqs)) {
+                qemu_co_queue_restart_all(&bs->throttled_reqs);
+                busy = true;
+            }
+        }
+    } while (busy);
 
     /* If requests are still pending there is a bug somewhere */
     QTAILQ_FOREACH(bs, &bdrv_states, list) {