diff options
author | Eduardo Habkost <ehabkost@redhat.com> | 2009-09-09 17:29:06 -0300 |
---|---|---|
committer | Eduardo Habkost <ehabkost@redhat.com> | 2009-09-09 18:02:08 -0300 |
commit | 1d6b5705a80c58e92bbb82c015c2b3fc173881ca (patch) | |
tree | a26cb7f22ec5ca1272eda976996220ecf7118d5f | |
parent | 41f29a5aa47004f244e842385915d002d0ddead0 (diff) |
Combined patch of two upstream commits (the second fixing the first)
Message-id: <1252527484-19604-4-git-send-email-ehabkost@redhat.com>
RH-Author: Eduardo Habkost <ehabkost@redhat.com>
Patchwork-id: 3385
O-Subject: [PATCH 3/5] Combined patch of two upstream commits (the second fixing
the first)
Bugzilla:
Author: Kevin Wolf <kwolf@redhat.com>
Bugzilla: 520693
RH-Acked-by: Juan Quintela <quintela@redhat.com>
RH-Acked-by: Andrea Arcangeli <aarcange@redhat.com>
RH-Acked-by: Gleb Natapov <gleb@redhat.com>
l2_allocate: Write complete sectors
When modifying the L1 table, l2_allocate() needs to write complete sectors
instead of single entries. The L1 table is already in memory, reading it from
disk in the block layer to align the request is wasted performance.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
---
qcow2: Fix L1 table memory allocation
Contrary to what one could expect, the size of L1 tables is not cluster
aligned. So as we're writing whole sectors now instead of single entries,
we need to ensure that the L1 table in memory is large enough; otherwise
write would access memory after the end of the L1 table.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
qemu/block-qcow2.c | 53 ++++++++++++++++++++++++++++++++++++++-------------
1 files changed, 39 insertions(+), 14 deletions(-)
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
---
qemu/block-qcow2.c | 53 ++++++++++++++++++++++++++++++++++++++-------------
1 files changed, 39 insertions(+), 14 deletions(-)
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
-rw-r--r-- | qemu/block-qcow2.c | 53 |
1 files changed, 39 insertions, 14 deletions
diff --git a/qemu/block-qcow2.c b/qemu/block-qcow2.c index 171eb83f..9c8212bf 100644 --- a/qemu/block-qcow2.c +++ b/qemu/block-qcow2.c @@ -183,6 +183,13 @@ static void free_clusters(BlockDriverState *bs, int64_t offset, int64_t size); static int check_refcounts(BlockDriverState *bs); +static inline int64_t align_offset(int64_t offset, int n) +{ + offset = (offset + n - 1) & ~(n - 1); + return offset; +} + + static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename) { const QCowHeader *cow_header = (const void *)buf; @@ -334,7 +341,8 @@ static int qcow_open(BlockDriverState *bs, const char *filename, int flags) if (s->l1_size < s->l1_vm_state_index) goto fail; s->l1_table_offset = header.l1_table_offset; - s->l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t)); + s->l1_table = qemu_mallocz( + align_offset(s->l1_size * sizeof(uint64_t), 512)); if (!s->l1_table) goto fail; if (bdrv_pread(s->hd, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) != @@ -526,12 +534,6 @@ static inline int l2_cache_new_entry(BlockDriverState *bs) return min_index; } -static int64_t align_offset(int64_t offset, int n) -{ - offset = (offset + n - 1) & ~(n - 1); - return offset; -} - static int grow_l1_table(BlockDriverState *bs, int min_size) { BDRVQcowState *s = bs->opaque; @@ -551,7 +553,7 @@ static int grow_l1_table(BlockDriverState *bs, int min_size) #endif new_l1_size2 = sizeof(uint64_t) * new_l1_size; - new_l1_table = qemu_mallocz(new_l1_size2); + new_l1_table = qemu_mallocz(align_offset(new_l1_size2, 512)); if (!new_l1_table) return -ENOMEM; memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); @@ -650,6 +652,31 @@ static uint64_t *l2_load(BlockDriverState *bs, uint64_t l2_offset) } /* + * Writes one sector of the L1 table to the disk (can't update single entries + * and we really don't want bdrv_pread to perform a read-modify-write) + */ +#define L1_ENTRIES_PER_SECTOR (512 / 8) +static int write_l1_entry(BDRVQcowState *s, int l1_index) +{ + uint64_t buf[L1_ENTRIES_PER_SECTOR]; + int l1_start_index; + int i; + + l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); + for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { + buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); + } + + if (bdrv_pwrite(s->hd, s->l1_table_offset + 8 * l1_start_index, + buf, sizeof(buf)) != sizeof(buf)) + { + return -1; + } + + return 0; +} + +/* * l2_allocate * * Allocate a new l2 entry in the file. If l1_index points to an already @@ -663,7 +690,7 @@ static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index) { BDRVQcowState *s = bs->opaque; int min_index; - uint64_t old_l2_offset, tmp; + uint64_t old_l2_offset; uint64_t *l2_table, l2_offset; old_l2_offset = s->l1_table[l1_index]; @@ -675,11 +702,9 @@ static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index) /* update the L1 entry */ s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; - - tmp = cpu_to_be64(l2_offset | QCOW_OFLAG_COPIED); - if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp), - &tmp, sizeof(tmp)) != sizeof(tmp)) + if (write_l1_entry(s, l1_index) < 0) { return NULL; + } /* allocate a new entry in the l2 cache */ @@ -1829,7 +1854,7 @@ static int update_snapshot_refcount(BlockDriverState *bs, l1_size2 = l1_size * sizeof(uint64_t); l1_allocated = 0; if (l1_table_offset != s->l1_table_offset) { - l1_table = qemu_malloc(l1_size2); + l1_table = qemu_mallocz(align_offset(l1_size2, 512)); if (!l1_table) goto fail; l1_allocated = 1; |