diff --git a/bolt_windows.go b/bolt_windows.go index f810ac313..e99a0d621 100644 --- a/bolt_windows.go +++ b/bolt_windows.go @@ -68,19 +68,6 @@ func mmap(db *DB, sz int) error { var sizelo, sizehi uint32 if !db.readOnly { - if db.MaxSize > 0 && sz > db.MaxSize { - // The max size only limits future writes; however, we don’t block opening - // and mapping the database if it already exceeds the limit. - fileSize, err := db.fileSize() - if err != nil { - return fmt.Errorf("could not check existing db file size: %s", err) - } - - if sz > fileSize { - return errors.ErrMaxSizeReached - } - } - // Truncate the database to the size of the mmap. if err := db.file.Truncate(int64(sz)); err != nil { return fmt.Errorf("truncate: %s", err) diff --git a/db.go b/db.go index 5d3e26496..12a1a00b2 100644 --- a/db.go +++ b/db.go @@ -1166,13 +1166,26 @@ func (db *DB) allocate(txid common.Txid, count int) (*common.Page, error) { p.SetId(db.rwtx.meta.Pgid()) var minsz = int((p.Id()+common.Pgid(count))+1) * db.pageSize if minsz >= db.datasz { - if err := db.mmap(minsz); err != nil { - if err == berrors.ErrMaxSizeReached { - return nil, err + if db.MaxSize > 0 { + // this calculation matches the calculation in grow + // however, I don't quite understand it. Why is the allocation increment added to the size required, + // rather than the size required rounded up to the next multiple of the allocation increment? + nextAllocSize := minsz + if nextAllocSize < db.AllocSize { + nextAllocSize = db.AllocSize } else { - return nil, fmt.Errorf("mmap allocate error: %s", err) + nextAllocSize += db.AllocSize + } + + if nextAllocSize > db.MaxSize { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] maximum db size reached, size: %d, db.MaxSize: %d", runtime.GOOS, runtime.GOARCH, minsz, db.MaxSize) + return nil, berrors.ErrMaxSizeReached } } + + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } } // Move the page id high water mark. @@ -1203,11 +1216,6 @@ func (db *DB) grow(sz int) error { sz += db.AllocSize } - if !db.readOnly && db.MaxSize > 0 && sz > db.MaxSize { - lg.Errorf("[GOOS: %s, GOARCH: %s] maximum db size reached, size: %d, db.MaxSize: %d", runtime.GOOS, runtime.GOARCH, sz, db.MaxSize) - return berrors.ErrMaxSizeReached - } - // Truncate and fsync to ensure file size metadata is flushed. // https://github.com/boltdb/bolt/issues/284 if !db.NoGrowSync && !db.readOnly { diff --git a/db_test.go b/db_test.go index feb3368ce..07068cb28 100644 --- a/db_test.go +++ b/db_test.go @@ -1483,11 +1483,11 @@ func TestDBUnmap(t *testing.T) { db.DB = nil } -// Convenience function for inserting a bunch of keys with 1000 byte values -func fillDBWithKeys(db *btesting.DB, numKeys int) error { +// Convenience function for inserting a bunch of keys with values of a specific size (in bytes) +func fillDBWithKeys(db *btesting.DB, numKeys, valueSize int) error { return db.Fill([]byte("data"), 1, numKeys, func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, - func(tx int, k int) []byte { return make([]byte, 1000) }, + func(tx int, k int) []byte { return make([]byte, valueSize) }, ) } @@ -1543,15 +1543,19 @@ func TestDB_MaxSizeNotExceeded(t *testing.T) { path := db.Path() // The data file should be 4 MiB now (expanded once from zero). - // It should have space for roughly 16 more entries before trying to grow - // Keep inserting until grow is required - err := fillDBWithKeys(db, 100) + // It should have space for roughly one more entry with value size 100kB before trying to grow + // This next insert should be too big + err := fillDBWithKeys(db, 2, 100000) assert.ErrorIs(t, err, berrors.ErrMaxSizeReached) newSz := fileSize(path) require.Greater(t, newSz, int64(0), "unexpected new file size: %d", newSz) assert.LessOrEqual(t, newSz, int64(db.MaxSize), "The size of the data file should not exceed db.MaxSize") + // Now try another write that shouldn't increase the max size + err = fillDBWithKeys(db, 1, 1) + assert.NoError(t, err, "Adding an entry after a failed, oversized write should not error") + err = db.Close() require.NoError(t, err, "Closing the re-opened database should succeed") }) @@ -1567,7 +1571,7 @@ func TestDB_MaxSizeExceededCanOpen(t *testing.T) { path := db.Path() // Insert a reasonable amount of data below the max size. - err := fillDBWithKeys(db, 2000) + err := fillDBWithKeys(db, 2000, 1000) require.NoError(t, err, "fillDbWithKeys should succeed") err = db.Close() @@ -1625,7 +1629,7 @@ func TestDB_MaxSizeExceededCanOpenWithHighMmap(t *testing.T) { } // Ensure that when InitialMmapSize is above the limit, opening a database -// that is beyond the maximum size fails in Windows. +// that is beyond the maximum size does not grow the db on Windows. // In Windows, the file must be expanded to the mmap initial size. // https://github.com/etcd-io/bbolt/issues/928 func TestDB_MaxSizeExceededDoesNotGrow(t *testing.T) { @@ -1643,17 +1647,23 @@ func TestDB_MaxSizeExceededDoesNotGrow(t *testing.T) { // The data file should be 4 MiB now (expanded once from zero). minimumSizeForTest := int64(1024 * 1024) - newSz := fileSize(path) - assert.GreaterOrEqual(t, newSz, minimumSizeForTest, "unexpected new file size: %d. Expected at least %d", newSz, minimumSizeForTest) + expandedSize := fileSize(path) + assert.GreaterOrEqual(t, expandedSize, minimumSizeForTest, "unexpected new file size: %d. Expected at least %d", expandedSize, minimumSizeForTest) // Now try to re-open the database with an extremely small max size and // an initial mmap size to be greater than the actual file size, forcing an illegal grow on open t.Logf("Reopening bbolt DB at: %s", path) - _, err = btesting.OpenDBWithOption(t, path, &bolt.Options{ + db, err = btesting.OpenDBWithOption(t, path, &bolt.Options{ MaxSize: 1, - InitialMmapSize: int(newSz) * 2, + InitialMmapSize: int(expandedSize) * 2, }) - assert.Error(t, err, "Opening the DB with InitialMmapSize > MaxSize should cause an error on Windows") + require.NoError(t, err, "Opening the DB with InitialMmapSize > MaxSize should not cause an error, because it is not a write operation") + + db.MustClose() + + newSize := fileSize(path) + + assert.Equal(t, expandedSize, newSize, "Expected the file size to stay the same when MaxSize set impossibly low") } func TestDB_HugeValue(t *testing.T) {