Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 340e01248478ba8b78a6d4d1809b1eff > files > 206

kvm-83-270.el5_11.src.rpm

From 1d75f37c81289dad84a0f5e2b52a8c81810d405b Mon Sep 17 00:00:00 2001
From: Juan Quintela <quintela@redhat.com>
Date: Mon, 20 Jun 2011 11:42:14 -0300
Subject: [PATCH] Revert "Make migration robust to trashing workloads."

RH-Author: Juan Quintela <quintela@redhat.com>
Message-id: <1308570134-1670-1-git-send-email-quintela@redhat.com>
Patchwork-id: 27463
O-Subject: [RHEL 5.7 PATCH] Revert "Make migration robust to trashing workloads."
Bugzilla: 713392
RH-Acked-by: Alex Williamson <alex.williamson@redhat.com>
RH-Acked-by: Marcelo Tosatti <mtosatti@redhat.com>
RH-Acked-by: Glauber Costa <glommer@redhat.com>

This reverts commit e0d933426ad93babd4b9ea46bac7c013ae0e9985.

This patch had a bug, we are ending stage 2 even if we haven't called
kvm_update_dirty_pages_log() on this round.  That means that we can
end stage 2 when there are still plenty of pages to migrate, causing
big stalls.

Please, when acking this patch, consider it for 5.6.z.

brew build: https://brewweb.devel.redhat.com/taskinfo?taskID=3412433
Bugzilla: 713392
Signed-off-by: Juan Quintela <quintela@redhat.com>

---
 qemu/vl.c |   43 ++++++++++++++++++-------------------------
 1 files changed, 18 insertions(+), 25 deletions(-)

Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
---
 qemu/vl.c |   43 ++++++++++++++++++-------------------------
 1 files changed, 18 insertions(+), 25 deletions(-)

diff --git a/qemu/vl.c b/qemu/vl.c
index 781f11c..3e9503f 100644
--- a/qemu/vl.c
+++ b/qemu/vl.c
@@ -3485,7 +3485,6 @@ static int ram_save_live(QEMUFile *f, int stage, void *opaque)
     ram_addr_t addr;
     double bwidth = 0;
     int i;
-    uint64_t target_size;
 
     if (stage == 1) {
         /* Make sure all dirty bits are set */
@@ -3500,27 +3499,6 @@ static int ram_save_live(QEMUFile *f, int stage, void *opaque)
         qemu_put_be64(f, phys_ram_size | RAM_SAVE_FLAG_MEM_SIZE);
     }
 
-    if (stage == 2) {
-        /* We have setup the target as 100ms.  a gigabit ethernet can
-           write at ~ 100MB/s, so in 100ms it can write ~10MB.
-
-           100ms (in ns) / 10 / TARGET_PAGE_SIZE = 2441 pages (~9.5MB)
-
-           For upstream we need a better solution.
-        */
-
-        target_size = migrate_max_downtime() / 10/ TARGET_PAGE_SIZE;
-
-        if (ram_save_remaining () <= target_size) {
-            int r;
-            if (kvm_enabled() && (r = kvm_update_dirty_pages_log())) {
-                printf("%s: update dirty pages log failed %d\n", __FUNCTION__, r);
-                qemu_file_set_has_error(f);
-                return 0;
-            }
-        }
-    }
-
     bwidth = get_clock();
 
     i = 0;
@@ -3554,10 +3532,25 @@ static int ram_save_live(QEMUFile *f, int stage, void *opaque)
     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
 
     if (stage == 2) {
-        target_size = migrate_max_downtime() / 10/ TARGET_PAGE_SIZE;
-        return ram_save_remaining () <= target_size;
-    }
+        /* We have setup the target as 100ms.  a gigabit ethernet can
+           write at ~ 100MB/s, so in 100ms it can write ~10MB.
+
+           100ms (in ns) / 10 / TARGET_PAGE_SIZE = 2441 pages (~9.5MB)
+
+           For upstream we need a better solution.
+        */
+        uint64_t target_size = migrate_max_downtime() / 10/ TARGET_PAGE_SIZE;
 
+        if (ram_save_remaining () <= target_size) {
+            int r;
+            if (kvm_enabled() && (r = kvm_update_dirty_pages_log())) {
+                printf("%s: update dirty pages log failed %d\n", __FUNCTION__, r);
+                qemu_file_set_has_error(f);
+                return 0;
+            }
+            return ram_save_remaining () <= target_size;
+        }
+    }
     return 0;
 }
 
-- 
1.7.3.2