Skip to content

Commit f8d59df

Browse files
Vladimir Sementsov-Ogievskiykevmw
Vladimir Sementsov-Ogievskiy
authored andcommitted
block/backup: fix fleecing scheme: use serialized writes
Fleecing scheme works as follows: we want a kind of temporary snapshot of active drive A. We create temporary image B, with B->backing = A. Then we start backup(sync=none) from A to B. From this point, B reads as point-in-time snapshot of A (A continues to be active drive, accepting guest IO). This scheme needs some additional synchronization between reads from B and backup COW operations, otherwise, the following situation is theoretically possible: (assume B is qcow2, client is NBD client, reading from B) 1. client starts reading and take qcow2 mutex in qcow2_co_preadv, and goes up to l2 table loading (assume cache miss) 2) guest write => backup COW => qcow2 write => try to take qcow2 mutex => waiting 3. l2 table loaded, we see that cluster is UNALLOCATED, go to "case QCOW2_CLUSTER_UNALLOCATED" and unlock mutex before bdrv_co_preadv(bs->backing, ...) 4) aha, mutex unlocked, backup COW continues, and we finally finish guest write and change cluster in our active disk A 5. actually, do bdrv_co_preadv(bs->backing, ...) and read _new updated_ data. To avoid this, let's make backup writes serializing, to not intersect with reads from B. Note: we expand range of handled cases from (sync=none and B->backing = A) to just (A in backing chain of B), to finally allow safe reading from B during backup for all cases when A in backing chain of B, i.e. B formally looks like point-in-time snapshot of A. Signed-off-by: Vladimir Sementsov-Ogievskiy <[email protected]> Reviewed-by: Fam Zheng <[email protected]> Signed-off-by: Kevin Wolf <[email protected]>
1 parent 09d2f94 commit f8d59df

File tree

1 file changed

+14
-6
lines changed

1 file changed

+14
-6
lines changed

block/backup.c

+14-6
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,8 @@ typedef struct BackupBlockJob {
4747
HBitmap *copy_bitmap;
4848
bool use_copy_range;
4949
int64_t copy_range_size;
50+
51+
bool serialize_target_writes;
5052
} BackupBlockJob;
5153

5254
static const BlockJobDriver backup_job_driver;
@@ -102,6 +104,8 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
102104
QEMUIOVector qiov;
103105
BlockBackend *blk = job->common.blk;
104106
int nbytes;
107+
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
108+
int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
105109

106110
hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1);
107111
nbytes = MIN(job->cluster_size, job->len - start);
@@ -112,8 +116,7 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
112116
iov.iov_len = nbytes;
113117
qemu_iovec_init_external(&qiov, &iov, 1);
114118

115-
ret = blk_co_preadv(blk, start, qiov.size, &qiov,
116-
is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0);
119+
ret = blk_co_preadv(blk, start, qiov.size, &qiov, read_flags);
117120
if (ret < 0) {
118121
trace_backup_do_cow_read_fail(job, start, ret);
119122
if (error_is_read) {
@@ -124,11 +127,11 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
124127

125128
if (qemu_iovec_is_zero(&qiov)) {
126129
ret = blk_co_pwrite_zeroes(job->target, start,
127-
qiov.size, BDRV_REQ_MAY_UNMAP);
130+
qiov.size, write_flags | BDRV_REQ_MAY_UNMAP);
128131
} else {
129132
ret = blk_co_pwritev(job->target, start,
130-
qiov.size, &qiov,
131-
job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
133+
qiov.size, &qiov, write_flags |
134+
(job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0));
132135
}
133136
if (ret < 0) {
134137
trace_backup_do_cow_write_fail(job, start, ret);
@@ -156,14 +159,16 @@ static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
156159
int nr_clusters;
157160
BlockBackend *blk = job->common.blk;
158161
int nbytes;
162+
int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
163+
int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
159164

160165
assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
161166
nbytes = MIN(job->copy_range_size, end - start);
162167
nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
163168
hbitmap_reset(job->copy_bitmap, start / job->cluster_size,
164169
nr_clusters);
165170
ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
166-
is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0, 0);
171+
read_flags, write_flags);
167172
if (ret < 0) {
168173
trace_backup_do_cow_copy_range_fail(job, start, ret);
169174
hbitmap_set(job->copy_bitmap, start / job->cluster_size,
@@ -701,6 +706,9 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
701706
sync_bitmap : NULL;
702707
job->compress = compress;
703708

709+
/* Detect image-fleecing (and similar) schemes */
710+
job->serialize_target_writes = bdrv_chain_contains(target, bs);
711+
704712
/* If there is no backing file on the target, we cannot rely on COW if our
705713
* backup cluster size is smaller than the target cluster size. Even for
706714
* targets with a backing file, try to avoid COW if possible. */

0 commit comments

Comments
 (0)