Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 3160499aacb81f6735941eb4c372d87a > files > 644

kvm-83-164.el5_5.30.src.rpm

From f786ac131f701dbf5e7e66fe929f022036a474bd Mon Sep 17 00:00:00 2001
From: Kevin Wolf <kwolf@redhat.com>
Date: Wed, 27 Jan 2010 17:46:44 -0200
Subject: [PATCH 1/2] virtio-blk: Fix reads turned into writes after read error

RH-Author: Kevin Wolf <kwolf@redhat.com>
Message-id: <1264614405-21152-2-git-send-email-kwolf@redhat.com>
Patchwork-id: 6725
O-Subject: [PATCH 1/2] virtio-blk: Fix reads turned into writes after read error
Bugzilla: 552487
RH-Acked-by: Christoph Hellwig <chellwig@redhat.com>

Originally, virtio_blk_dma_restart_bh had only to handle write errors. Since we
also handle read errors in this way and stop the VM, it has turned reads into
writes, which can lead to image corruption. Fix this by distinguishing reads
and writes.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
 qemu/hw/virtio-blk.c |   42 +++++++++++++++++++++++++++---------------
 1 files changed, 27 insertions(+), 15 deletions(-)

Signed-off-by: Glauber Costa <glommer@redhat.com>
---
 qemu/hw/virtio-blk.c |   42 +++++++++++++++++++++++++++---------------
 1 files changed, 27 insertions(+), 15 deletions(-)

diff --git a/qemu/hw/virtio-blk.c b/qemu/hw/virtio-blk.c
index 9866a53..55e8bdd 100644
--- a/qemu/hw/virtio-blk.c
+++ b/qemu/hw/virtio-blk.c
@@ -181,13 +181,33 @@ static int virtio_blk_handle_write(VirtIOBlockReq *req)
     return 0;
 }
 
+static int virtio_blk_handle_read(VirtIOBlockReq *req)
+{
+    int i;
+
+    for (i = 0; i < req->elem.in_num - 1; i++)
+        req->size += req->elem.in_sg[i].iov_len;
+
+    req->buffer = qemu_memalign(512, req->size);
+    if (req->buffer == NULL) {
+        qemu_free(req);
+        return -1;
+    }
+
+    bdrv_aio_read(req->dev->bs, req->out->sector,
+                  req->buffer,
+                  req->size / 512,
+                  virtio_blk_rw_complete,
+                  req);
+    return 0;
+}
+
 static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
 {
     VirtIOBlock *s = to_virtio_blk(vdev);
     VirtIOBlockReq *req;
 
     while ((req = virtio_blk_get_request(s))) {
-        int i;
 
         if (req->elem.out_num < 1 || req->elem.in_num < 1) {
             fprintf(stderr, "virtio-blk missing headers\n");
@@ -216,20 +236,8 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
             if (virtio_blk_handle_write(req) < 0)
                 break;
         } else {
-            for (i = 0; i < req->elem.in_num - 1; i++)
-                req->size += req->elem.in_sg[i].iov_len;
-
-            req->buffer = qemu_memalign(512, req->size);
-            if (req->buffer == NULL) {
-                qemu_free(req);
+            if (virtio_blk_handle_read(req) < 0)
                 break;
-            }
-
-            bdrv_aio_read(s->bs, req->out->sector,
-                          req->buffer,
-                          req->size / 512,
-                          virtio_blk_rw_complete,
-                          req);
         }
     }
     /*
@@ -250,7 +258,11 @@ static void virtio_blk_dma_restart_bh(void *opaque)
     s->rq = NULL;
 
     while (req) {
-        virtio_blk_handle_write(req);
+        if (req->out->type & VIRTIO_BLK_T_OUT) {
+            virtio_blk_handle_write(req);
+        } else {
+            virtio_blk_handle_read(req);
+        }
         req = req->next;
     }
 }
-- 
1.6.5.2