@@ -70,7 +70,6 @@ typedef struct QCowHeader {
70
70
typedef struct BDRVQcowState {
71
71
int cluster_bits ;
72
72
int cluster_size ;
73
- int cluster_sectors ;
74
73
int l2_bits ;
75
74
int l2_size ;
76
75
unsigned int l1_size ;
@@ -235,7 +234,6 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
235
234
}
236
235
s -> cluster_bits = header .cluster_bits ;
237
236
s -> cluster_size = 1 << s -> cluster_bits ;
238
- s -> cluster_sectors = 1 << (s -> cluster_bits - 9 );
239
237
s -> l2_bits = header .l2_bits ;
240
238
s -> l2_size = 1 << s -> l2_bits ;
241
239
bs -> total_sectors = header .size / 512 ;
@@ -613,8 +611,18 @@ static int decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
613
611
return 0 ;
614
612
}
615
613
616
- static coroutine_fn int qcow_co_readv (BlockDriverState * bs , int64_t sector_num ,
617
- int nb_sectors , QEMUIOVector * qiov )
614
+ static void qcow_refresh_limits (BlockDriverState * bs , Error * * errp )
615
+ {
616
+ /* At least encrypted images require 512-byte alignment. Apply the
617
+ * limit universally, rather than just on encrypted images, as
618
+ * it's easier to let the block layer handle rounding than to
619
+ * audit this code further. */
620
+ bs -> bl .request_alignment = BDRV_SECTOR_SIZE ;
621
+ }
622
+
623
+ static coroutine_fn int qcow_co_preadv (BlockDriverState * bs , uint64_t offset ,
624
+ uint64_t bytes , QEMUIOVector * qiov ,
625
+ int flags )
618
626
{
619
627
BDRVQcowState * s = bs -> opaque ;
620
628
int offset_in_cluster ;
@@ -624,9 +632,8 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
624
632
QEMUIOVector hd_qiov ;
625
633
uint8_t * buf ;
626
634
void * orig_buf ;
627
- int64_t offset = sector_num * BDRV_SECTOR_SIZE ;
628
- int64_t bytes = nb_sectors * BDRV_SECTOR_SIZE ;
629
635
636
+ assert (!flags );
630
637
if (qiov -> niov > 1 ) {
631
638
buf = orig_buf = qemu_try_blockalign (bs , qiov -> size );
632
639
if (buf == NULL ) {
@@ -718,9 +725,9 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
718
725
return ret ;
719
726
}
720
727
721
- static coroutine_fn int qcow_co_writev (BlockDriverState * bs , int64_t sector_num ,
722
- int nb_sectors , QEMUIOVector * qiov ,
723
- int flags )
728
+ static coroutine_fn int qcow_co_pwritev (BlockDriverState * bs , uint64_t offset ,
729
+ uint64_t bytes , QEMUIOVector * qiov ,
730
+ int flags )
724
731
{
725
732
BDRVQcowState * s = bs -> opaque ;
726
733
int offset_in_cluster ;
@@ -730,8 +737,6 @@ static coroutine_fn int qcow_co_writev(BlockDriverState *bs, int64_t sector_num,
730
737
QEMUIOVector hd_qiov ;
731
738
uint8_t * buf ;
732
739
void * orig_buf ;
733
- int64_t offset = sector_num * BDRV_SECTOR_SIZE ;
734
- int64_t bytes = nb_sectors * BDRV_SECTOR_SIZE ;
735
740
736
741
assert (!flags );
737
742
s -> cluster_cache_offset = -1 ; /* disable compressed cache */
@@ -1104,8 +1109,7 @@ qcow_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
1104
1109
1105
1110
if (ret != Z_STREAM_END || out_len >= s -> cluster_size ) {
1106
1111
/* could not compress: write normal cluster */
1107
- ret = qcow_co_writev (bs , offset >> BDRV_SECTOR_BITS ,
1108
- bytes >> BDRV_SECTOR_BITS , qiov , 0 );
1112
+ ret = qcow_co_pwritev (bs , offset , bytes , qiov , 0 );
1109
1113
if (ret < 0 ) {
1110
1114
goto fail ;
1111
1115
}
@@ -1190,9 +1194,10 @@ static BlockDriver bdrv_qcow = {
1190
1194
.bdrv_co_create_opts = qcow_co_create_opts ,
1191
1195
.bdrv_has_zero_init = bdrv_has_zero_init_1 ,
1192
1196
.supports_backing = true,
1197
+ .bdrv_refresh_limits = qcow_refresh_limits ,
1193
1198
1194
- .bdrv_co_readv = qcow_co_readv ,
1195
- .bdrv_co_writev = qcow_co_writev ,
1199
+ .bdrv_co_preadv = qcow_co_preadv ,
1200
+ .bdrv_co_pwritev = qcow_co_pwritev ,
1196
1201
.bdrv_co_block_status = qcow_co_block_status ,
1197
1202
1198
1203
.bdrv_make_empty = qcow_make_empty ,
0 commit comments