@@ -349,7 +349,7 @@ type Metrics struct {
349349 BackingTableCount uint64
350350 // The sum of the sizes of the BackingTableCount sstables that are backing virtual tables.
351351 BackingTableSize uint64
352- // Compression statistics for sstable data (does not include blob files) .
352+ // Compression statistics for the current sstables .
353353 Compression CompressionMetrics
354354
355355 // Local file sizes.
@@ -447,9 +447,17 @@ type Metrics struct {
447447 ZombieCount uint64
448448 }
449449
450+ // Compression statistics for the current blob files.
450451 Compression CompressionMetrics
451452 }
452453
454+ // CompressionCounters are cumulative counters for the number of logical
455+ // (uncompressed) bytes that went through compression and decompression.
456+ CompressionCounters struct {
457+ LogicalBytesCompressed block.ByLevel [block.ByKind [uint64 ]]
458+ LogicalBytesDecompressed block.ByLevel [block.ByKind [uint64 ]]
459+ }
460+
453461 FileCache FileCacheMetrics
454462
455463 // Count of the number of open sstable iterators.
@@ -500,7 +508,7 @@ type Metrics struct {
500508
501509// CompressionMetrics contains compression metrics for sstables or blob files.
502510type CompressionMetrics struct {
503- // NoCompressionBytes is the total number of bytes in files that do are not
511+ // NoCompressionBytes is the total number of bytes in files that are not
504512 // compressed. Data can be uncompressed when 1) compression is disabled; 2)
505513 // for certain special types of blocks; and 3) for blocks that are not
506514 // compressible.
@@ -845,6 +853,16 @@ var (
845853 table .Div (),
846854 table .String ("blob files" , 13 , table .AlignRight , func (i compressionInfo ) string { return i .blobFiles }),
847855 )
856+ compressionCountersTableHeader = ` | logical bytes compressed / decompressed`
857+ compressionCountersTable = table .Define [compressionCountersInfo ](
858+ table .String ("level" , 5 , table .AlignRight , func (i compressionCountersInfo ) string { return i .level }),
859+ table .Div (),
860+ table .String ("data blocks" , 14 , table .AlignCenter , func (i compressionCountersInfo ) string { return i .DataBlocks }),
861+ table .Div (),
862+ table .String ("value blocks" , 14 , table .AlignCenter , func (i compressionCountersInfo ) string { return i .ValueBlocks }),
863+ table .Div (),
864+ table .String ("other blocks" , 14 , table .AlignCenter , func (i compressionCountersInfo ) string { return i .OtherBlocks }),
865+ )
848866)
849867
850868type commitPipelineInfo struct {
@@ -973,6 +991,34 @@ func makeCompressionInfo(algorithm string, table, blob CompressionStatsForSettin
973991 return i
974992}
975993
994+ type compressionCountersInfo struct {
995+ level string
996+ block.ByKind [string ]
997+ }
998+
999+ func makeCompressionCountersInfo (m * Metrics ) []compressionCountersInfo {
1000+ var result []compressionCountersInfo
1001+ isZero := func (c * block.ByKind [uint64 ]) bool {
1002+ return c .DataBlocks == 0 && c .ValueBlocks == 0 && c .OtherBlocks == 0
1003+ }
1004+ addLevel := func (level string , compressed , decompressed * block.ByKind [uint64 ]) {
1005+ if isZero (compressed ) && isZero (decompressed ) {
1006+ return
1007+ }
1008+ result = append (result , compressionCountersInfo {
1009+ level : level ,
1010+ ByKind : block.ByKind [string ]{
1011+ DataBlocks : humanizeBytes (compressed .DataBlocks ) + " / " + humanizeBytes (decompressed .DataBlocks ),
1012+ ValueBlocks : humanizeBytes (compressed .ValueBlocks ) + " / " + humanizeBytes (decompressed .ValueBlocks ),
1013+ OtherBlocks : humanizeBytes (compressed .OtherBlocks ) + " / " + humanizeBytes (decompressed .OtherBlocks )},
1014+ })
1015+ }
1016+ addLevel ("L0-L4" , & m .CompressionCounters .LogicalBytesCompressed .OtherLevels , & m .CompressionCounters .LogicalBytesDecompressed .OtherLevels )
1017+ addLevel ("L5" , & m .CompressionCounters .LogicalBytesCompressed .L5 , & m .CompressionCounters .LogicalBytesDecompressed .L5 )
1018+ addLevel ("L6" , & m .CompressionCounters .LogicalBytesCompressed .L6 , & m .CompressionCounters .LogicalBytesDecompressed .L6 )
1019+ return result
1020+ }
1021+
9761022// String pretty-prints the metrics.
9771023//
9781024// See testdata/metrics for an example.
@@ -1160,7 +1206,11 @@ func (m *Metrics) String() string {
11601206 compressionContents = slices .DeleteFunc (compressionContents , func (i compressionInfo ) bool {
11611207 return i .tables == "" && i .blobFiles == ""
11621208 })
1163- compressionTable .Render (cur , table.RenderOptions {}, compressionContents ... )
1209+ cur = compressionTable .Render (cur , table.RenderOptions {}, compressionContents ... )
1210+
1211+ cur = cur .NewlineReturn ()
1212+ cur = cur .WriteString (compressionCountersTableHeader ).NewlineReturn ()
1213+ compressionCountersTable .Render (cur , table.RenderOptions {}, makeCompressionCountersInfo (m )... )
11641214
11651215 return wb .String ()
11661216}
@@ -1190,8 +1240,8 @@ func (m *Metrics) StringForTests() string {
11901240
11911241 // We recalculate the file cache size using the 64-bit sizes, and we ignore
11921242 // the genericcache metadata size which is harder to adjust.
1193- const sstableReaderSize64bit = 280
1194- const blobFileReaderSize64bit = 112
1243+ const sstableReaderSize64bit = 288
1244+ const blobFileReaderSize64bit = 120
11951245 mCopy .FileCache .Size = mCopy .FileCache .TableCount * sstableReaderSize64bit + mCopy .FileCache .BlobFileCount * blobFileReaderSize64bit
11961246 if math .MaxInt == math .MaxInt64 {
11971247 // Verify the 64-bit sizes, so they are kept updated.
0 commit comments