From 96fc99294e60ca0806e0c579694b026cd44d91b3 Mon Sep 17 00:00:00 2001 From: Austin Larson <78000745+alarso16@users.noreply.github.com> Date: Mon, 29 Sep 2025 15:58:08 -0400 Subject: [PATCH 01/46] style: forbidigo t.Fatal (#1279) --- .avalanche-golangci.yml | 12 +- core/blockchain_ext_test.go | 669 ++++++------------ core/extstate/database_test.go | 2 +- network/network_test.go | 9 +- .../customheader/dynamic_fee_windower_test.go | 6 +- plugin/evm/customtypes/header_ext_test.go | 7 +- plugin/evm/gossiper_eth_gossiping_test.go | 2 +- sync/client/client_test.go | 235 ++---- sync/handlers/block_request_test.go | 47 +- sync/handlers/code_request_test.go | 9 +- sync/handlers/leafs_request_test.go | 41 +- sync/statesync/statesynctest/test_trie.go | 67 +- 12 files changed, 341 insertions(+), 765 deletions(-) diff --git a/.avalanche-golangci.yml b/.avalanche-golangci.yml index 4c7c032228..c6a27ebf0a 100644 --- a/.avalanche-golangci.yml +++ b/.avalanche-golangci.yml @@ -57,7 +57,7 @@ linters: # - depguard # - errcheck - errorlint - # - forbidigo + - forbidigo - goconst - gocritic - goprintffuncname @@ -104,12 +104,12 @@ linters: forbidigo: # Forbid the following identifiers (list of regexp). forbid: - - pattern: require\.Error$(# ErrorIs should be used instead)? - - pattern: require\.ErrorContains$(# ErrorIs should be used instead)? - - pattern: require\.EqualValues$(# Equal should be used instead)? - - pattern: require\.NotEqualValues$(# NotEqual should be used instead)? + # - pattern: require\.Error$(# ErrorIs should be used instead)? + # - pattern: require\.ErrorContains$(# ErrorIs should be used instead)? + # - pattern: require\.EqualValues$(# Equal should be used instead)? + # - pattern: require\.NotEqualValues$(# NotEqual should be used instead)? - pattern: ^(t|b|tb|f)\.(Fatal|Fatalf|Error|Errorf)$(# the require library should be used instead)? - - pattern: ^sort\.(Slice|Strings)$(# the slices package should be used instead)? + # - pattern: ^sort\.(Slice|Strings)$(# the slices package should be used instead)? # Exclude godoc examples from forbidigo checks. exclude-godoc-examples: false gosec: diff --git a/core/blockchain_ext_test.go b/core/blockchain_ext_test.go index 6fa9694f25..32351f05af 100644 --- a/core/blockchain_ext_test.go +++ b/core/blockchain_ext_test.go @@ -67,7 +67,7 @@ var tests = []ChainTest{ }, { "EmptyBlocks", - EmptyBlocks, + EmptyBlocksTest, }, { "EmptyAndNonEmptyBlocks", @@ -179,85 +179,58 @@ func checkBlockChainState( checkState func(sdb *state.StateDB) error, ) (*BlockChain, *BlockChain) { var ( + require = require.New(t) lastAcceptedBlock = bc.LastConsensusAcceptedBlock() newDB = rawdb.NewMemoryDatabase() ) acceptedState, err := bc.StateAt(lastAcceptedBlock.Root()) - if err != nil { - t.Fatal(err) - } - if err := checkState(acceptedState); err != nil { - t.Fatalf("Check state failed for original blockchain due to: %s", err) - } + require.NoError(err) + require.NoError(checkState(acceptedState), "Check state failed for original blockchain") oldChainDataDir := bc.CacheConfig().ChainDataDir // cacheConfig uses same reference in most tests newBlockChain, err := create(newDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatalf("Failed to create new blockchain instance: %s", err) - } + require.NoError(err, "Failed to create new blockchain instance") defer newBlockChain.Stop() for i := uint64(1); i <= lastAcceptedBlock.NumberU64(); i++ { block := bc.GetBlockByNumber(i) - if block == nil { - t.Fatalf("Failed to retrieve block by number %d from original chain", i) - } - if err := newBlockChain.InsertBlock(block); err != nil { - t.Fatalf("Failed to insert block %s:%d due to %s", block.Hash().Hex(), block.NumberU64(), err) - } - if err := newBlockChain.Accept(block); err != nil { - t.Fatalf("Failed to accept block %s:%d due to %s", block.Hash().Hex(), block.NumberU64(), err) - } + require.NotNilf(block, "Failed to retrieve block by number %d from original chain", i) + require.NoErrorf(newBlockChain.InsertBlock(block), "Failed to insert block %s:%d", block.Hash().Hex(), block.NumberU64()) + require.NoErrorf(newBlockChain.Accept(block), "Failed to accept block %s:%d", block.Hash().Hex(), block.NumberU64()) } newBlockChain.DrainAcceptorQueue() newLastAcceptedBlock := newBlockChain.LastConsensusAcceptedBlock() - if newLastAcceptedBlock.Hash() != lastAcceptedBlock.Hash() { - t.Fatalf("Expected new blockchain to have last accepted block %s:%d, but found %s:%d", lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64(), newLastAcceptedBlock.Hash().Hex(), newLastAcceptedBlock.NumberU64()) - } + require.Equal(lastAcceptedBlock.Hash(), newLastAcceptedBlock.Hash()) // Check that the state of [newBlockChain] passes the check acceptedState, err = newBlockChain.StateAt(lastAcceptedBlock.Root()) - if err != nil { - t.Fatal(err) - } - if err := checkState(acceptedState); err != nil { - t.Fatalf("Check state failed for newly generated blockchain due to: %s", err) - } + require.NoError(err) + require.NoErrorf(checkState(acceptedState), "Check state failed for newly generated blockchain") // Copy the database over to prevent any issues when re-using [originalDB] after this call. originalDB, err = copyMemDB(originalDB) - if err != nil { - t.Fatal(err) - } - newChainDataDir := copyFlatDir(t, oldChainDataDir) - restartedChain, err := create(originalDB, gspec, lastAcceptedBlock.Hash(), newChainDataDir) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + restartedChain, err := create(originalDB, gspec, lastAcceptedBlock.Hash(), oldChainDataDir) + require.NoError(err) defer restartedChain.Stop() - if currentBlock := restartedChain.CurrentBlock(); currentBlock.Hash() != lastAcceptedBlock.Hash() { - t.Fatalf("Expected restarted chain to have current block %s:%d, but found %s:%d", lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } - if restartedLastAcceptedBlock := restartedChain.LastConsensusAcceptedBlock(); restartedLastAcceptedBlock.Hash() != lastAcceptedBlock.Hash() { - t.Fatalf("Expected restarted chain to have current block %s:%d, but found %s:%d", lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64(), restartedLastAcceptedBlock.Hash().Hex(), restartedLastAcceptedBlock.NumberU64()) - } + currentBlock := restartedChain.CurrentBlock() + require.Equal(lastAcceptedBlock.Hash(), currentBlock.Hash(), "Restarted chain's current block does not match last accepted block") + restartedLastAcceptedBlock := restartedChain.LastConsensusAcceptedBlock() + require.Equal(lastAcceptedBlock.Hash(), restartedLastAcceptedBlock.Hash(), "Restarted chain's last accepted block does not match last accepted block") // Check that the state of [restartedChain] passes the check acceptedState, err = restartedChain.StateAt(lastAcceptedBlock.Root()) - if err != nil { - t.Fatal(err) - } - if err := checkState(acceptedState); err != nil { - t.Fatalf("Check state failed for restarted blockchain due to: %s", err) - } + require.NoError(err) + require.NoError(checkState(acceptedState), "Check state failed for restarted blockchain") return newBlockChain, restartedChain } func InsertChainAcceptSingleBlock(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -272,10 +245,8 @@ func InsertChainAcceptSingleBlock(t *testing.T, create createFunc) { Alloc: types.GenesisAlloc{addr1: {Balance: genesisBalance}}, } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} @@ -283,17 +254,12 @@ func InsertChainAcceptSingleBlock(t *testing.T, create createFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert three blocks into the chain and accept only the first block. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain[0]); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(err) + require.NoError(blockchain.Accept(chain[0])) blockchain.DrainAcceptorQueue() // check the state of the last accepted block @@ -328,6 +294,7 @@ func InsertChainAcceptSingleBlock(t *testing.T, create createFunc) { func InsertLongForkedChain(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -343,10 +310,8 @@ func InsertLongForkedChain(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) numBlocks := 129 signer := types.HomesteadSigner{} @@ -355,9 +320,7 @@ func InsertLongForkedChain(t *testing.T, create createFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Generate the forked chain to be longer than the original chain to check for a regression where // a longer chain can trigger a reorg. _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks+1, 10, func(_ int, gen *BlockGen) { @@ -365,103 +328,74 @@ func InsertLongForkedChain(t *testing.T, create createFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(5000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) if blockchain.snaps != nil { - if want, got := 1, blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + got := blockchain.snaps.NumBlockLayers() + require.Equal(1, got, "incorrect snapshot layer count") } // Insert both chains. - if _, err := blockchain.InsertChain(chain1); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain1) + require.NoError(err) if blockchain.snaps != nil { - if want, got := 1+len(chain1), blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + got := blockchain.snaps.NumBlockLayers() + require.Equal(1+len(chain1), got, "incorrect snapshot layer count") } - if _, err := blockchain.InsertChain(chain2); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain2) + require.NoError(err) if blockchain.snaps != nil { - if want, got := 1+len(chain1)+len(chain2), blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + got := blockchain.snaps.NumBlockLayers() + require.Equal(1+len(chain1)+len(chain2), got, "incorrect snapshot layer count") } currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[len(chain1)-1] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } + require.Equal(expectedCurrentBlock.Hash(), currentBlock.Hash()) - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.ValidateCanonicalChain()) // Accept the first block in [chain1], reject all blocks in [chain2] to // mimic the order that the consensus engine will call Accept/Reject in // and then Accept the rest of the blocks in [chain1]. - if err := blockchain.Accept(chain1[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[0])) blockchain.DrainAcceptorQueue() if blockchain.snaps != nil { - // Snap layer count should be 1 fewer - if want, got := len(chain1)+len(chain2), blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + // Snap layer count should match chain length + require.Equal(len(chain1)+len(chain2), blockchain.snaps.NumBlockLayers(), "incorrect snapshot layer count") } for i := 0; i < len(chain2); i++ { - if err := blockchain.Reject(chain2[i]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Reject(chain2[i])) if blockchain.snaps != nil { // Snap layer count should decrease by 1 per Reject - if want, got := len(chain1)+len(chain2)-i-1, blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + require.Equal(len(chain1)+len(chain2)-i-1, blockchain.snaps.NumBlockLayers(), "incorrect snapshot layer count") } } if blockchain.snaps != nil { - if want, got := len(chain1), blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + require.Equal(len(chain1), blockchain.snaps.NumBlockLayers(), "incorrect snapshot layer count") } for i := 1; i < len(chain1); i++ { - if err := blockchain.Accept(chain1[i]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[i])) blockchain.DrainAcceptorQueue() if blockchain.snaps != nil { // Snap layer count should decrease by 1 per Accept - if want, got := len(chain1)-i, blockchain.snaps.NumBlockLayers(); got != want { - t.Fatalf("incorrect snapshot layer count; got %d, want %d", got, want) - } + require.Equal(len(chain1)-i, blockchain.snaps.NumBlockLayers(), "incorrect snapshot layer count") } } lastAcceptedBlock := blockchain.LastConsensusAcceptedBlock() expectedLastAcceptedBlock := chain1[len(chain1)-1] - if lastAcceptedBlock.Hash() != expectedLastAcceptedBlock.Hash() { - t.Fatalf("Expected last accepted block to be %s:%d, but found %s%d", expectedLastAcceptedBlock.Hash().Hex(), expectedLastAcceptedBlock.NumberU64(), lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64()) - } - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.Equal(expectedLastAcceptedBlock.Hash(), lastAcceptedBlock.Hash()) + require.NoError(blockchain.ValidateCanonicalChain()) // check the state of the last accepted block checkState := func(sdb *state.StateDB) error { @@ -492,6 +426,7 @@ func InsertLongForkedChain(t *testing.T, create createFunc) { func AcceptNonCanonicalBlock(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -507,10 +442,8 @@ func AcceptNonCanonicalBlock(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) numBlocks := 3 signer := types.HomesteadSigner{} @@ -519,58 +452,40 @@ func AcceptNonCanonicalBlock(t *testing.T, create createFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(_ int, gen *BlockGen) { // Generate a transaction with a different amount to create a chain of blocks different from [chain1] tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(5000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert three blocks into the chain and accept only the first. - if _, err := blockchain.InsertChain(chain1); err != nil { - t.Fatal(err) - } - if _, err := blockchain.InsertChain(chain2); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain1) + require.NoError(err) + _, err = blockchain.InsertChain(chain2) + require.NoError(err) currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[len(chain1)-1] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } + require.Equal(expectedCurrentBlock.Hash(), currentBlock.Hash()) - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.ValidateCanonicalChain()) // Accept the first block in [chain2], reject all blocks in [chain1] to // mimic the order that the consensus engine will call Accept/Reject in. - if err := blockchain.Accept(chain2[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain2[0])) blockchain.DrainAcceptorQueue() for i := 0; i < len(chain1); i++ { - if err := blockchain.Reject(chain1[i]); err != nil { - t.Fatal(err) - } - require.False(t, blockchain.HasBlock(chain1[i].Hash(), chain1[i].NumberU64())) + require.NoError(blockchain.Reject(chain1[i])) + require.False(blockchain.HasBlock(chain1[i].Hash(), chain1[i].NumberU64())) } lastAcceptedBlock := blockchain.LastConsensusAcceptedBlock() expectedLastAcceptedBlock := chain2[0] - if lastAcceptedBlock.Hash() != expectedLastAcceptedBlock.Hash() { - t.Fatalf("Expected last accepted block to be %s:%d, but found %s%d", expectedLastAcceptedBlock.Hash().Hex(), expectedLastAcceptedBlock.NumberU64(), lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64()) - } - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.Equal(expectedLastAcceptedBlock.Hash(), lastAcceptedBlock.Hash()) + require.NoError(blockchain.ValidateCanonicalChain()) // check the state of the last accepted block checkState := func(sdb *state.StateDB) error { @@ -601,6 +516,7 @@ func AcceptNonCanonicalBlock(t *testing.T, create createFunc) { func SetPreferenceRewind(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -616,10 +532,8 @@ func SetPreferenceRewind(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) numBlocks := 3 signer := types.HomesteadSigner{} @@ -628,45 +542,30 @@ func SetPreferenceRewind(t *testing.T, create createFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert three blocks into the chain and accept only the first. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(err) currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain[len(chain)-1] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } - - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.Equal(expectedCurrentBlock.Hash(), currentBlock.Hash()) + require.NoError(blockchain.ValidateCanonicalChain()) // SetPreference to an ancestor of the currently preferred block. Test that this unlikely, but possible behavior // is handled correctly. - if err := blockchain.SetPreference(chain[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.SetPreference(chain[0])) currentBlock = blockchain.CurrentBlock() expectedCurrentBlock = chain[0] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } + require.Equal(expectedCurrentBlock.Hash(), currentBlock.Hash()) lastAcceptedBlock := blockchain.LastConsensusAcceptedBlock() expectedLastAcceptedBlock := blockchain.Genesis() - if lastAcceptedBlock.Hash() != expectedLastAcceptedBlock.Hash() { - t.Fatalf("Expected last accepted block to be %s:%d, but found %s%d", expectedLastAcceptedBlock.Hash().Hex(), expectedLastAcceptedBlock.NumberU64(), lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64()) - } - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.Equal(expectedLastAcceptedBlock.Hash(), lastAcceptedBlock.Hash()) + require.NoError(blockchain.ValidateCanonicalChain()) + // check the state of the last accepted block checkGenesisState := func(sdb *state.StateDB) error { nonce1 := sdb.GetNonce(addr1) @@ -690,19 +589,14 @@ func SetPreferenceRewind(t *testing.T, create createFunc) { } checkBlockChainState(t, blockchain, gspec, chainDB, create, checkGenesisState) - if err := blockchain.Accept(chain[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain[0])) blockchain.DrainAcceptorQueue() lastAcceptedBlock = blockchain.LastConsensusAcceptedBlock() expectedLastAcceptedBlock = chain[0] - if lastAcceptedBlock.Hash() != expectedLastAcceptedBlock.Hash() { - t.Fatalf("Expected last accepted block to be %s:%d, but found %s%d", expectedLastAcceptedBlock.Hash().Hex(), expectedLastAcceptedBlock.NumberU64(), lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64()) - } - if err := blockchain.ValidateCanonicalChain(); err != nil { - t.Fatal(err) - } + require.Equal(expectedLastAcceptedBlock.Hash(), lastAcceptedBlock.Hash()) + require.NoError(blockchain.ValidateCanonicalChain()) + checkUpdatedState := func(sdb *state.StateDB) error { nonce := sdb.GetNonce(addr1) if nonce != 1 { @@ -733,6 +627,7 @@ func SetPreferenceRewind(t *testing.T, create createFunc) { func BuildOnVariousStages(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") @@ -753,10 +648,8 @@ func BuildOnVariousStages(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} @@ -770,9 +663,8 @@ func BuildOnVariousStages(t *testing.T, create createFunc) { gen.AddTx(tx) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + // Build second chain forked off of the 10th block in [chain1] chain2, _, err := GenerateChain(gspec.Config, chain1[9], blockchain.engine, genDB, 10, 10, func(i int, gen *BlockGen) { // Send all funds back and forth between the two accounts @@ -784,9 +676,8 @@ func BuildOnVariousStages(t *testing.T, create createFunc) { gen.AddTx(tx) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + // Build third chain forked off of the 5th block in [chain1]. // The parent of this chain will be accepted before this fork // is inserted. @@ -800,62 +691,45 @@ func BuildOnVariousStages(t *testing.T, create createFunc) { gen.AddTx(tx) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert first 10 blocks from [chain1] - if _, err := blockchain.InsertChain(chain1); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain1) + require.NoError(err) // Accept the first 5 blocks for _, block := range chain1[0:5] { - if err := blockchain.Accept(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(block)) } blockchain.DrainAcceptorQueue() // Insert the forked chain [chain2] which starts at the 10th // block in [chain1] ie. a block that is still in processing. - if _, err := blockchain.InsertChain(chain2); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain2) + require.NoError(err) // Insert another forked chain starting at the last accepted // block from [chain1]. - if _, err := blockchain.InsertChain(chain3); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain3) + require.NoError(err) // Accept the next block in [chain1] and then reject all // of the blocks in [chain3], which would then be rejected. - if err := blockchain.Accept(chain1[5]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[5])) blockchain.DrainAcceptorQueue() for _, block := range chain3 { - if err := blockchain.Reject(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Reject(block)) } // Accept the rest of the blocks in [chain1] for _, block := range chain1[6:10] { - if err := blockchain.Accept(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(block)) } blockchain.DrainAcceptorQueue() // Accept the first block in [chain2] and reject the // subsequent blocks in [chain1] which would then be rejected. - if err := blockchain.Accept(chain2[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain2[0])) blockchain.DrainAcceptorQueue() for _, block := range chain1[10:] { - if err := blockchain.Reject(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Reject(block)) } // check the state of the last accepted block @@ -898,33 +772,27 @@ func BuildOnVariousStages(t *testing.T, create createFunc) { checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func EmptyBlocks(t *testing.T, create createFunc) { +func EmptyBlocksTest(t *testing.T, create createFunc) { + require := require.New(t) chainDB := rawdb.NewMemoryDatabase() - + // Ensure that key1 has some funds in the genesis block. gspec := &Genesis{ Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: types.GenesisAlloc{}, } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) - _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(int, *BlockGen) {}) - if err != nil { - t.Fatal(err) - } + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(_ int, _ *BlockGen) {}) + require.NoError(err) // Insert three blocks into the chain and accept only the first block. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(err) for _, block := range chain { - if err := blockchain.Accept(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(block)) } blockchain.DrainAcceptorQueue() @@ -938,6 +806,7 @@ func EmptyBlocks(t *testing.T, create createFunc) { func EmptyAndNonEmptyBlocks(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -953,10 +822,8 @@ func EmptyAndNonEmptyBlocks(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 5, 10, func(i int, gen *BlockGen) { if i == 3 { @@ -965,17 +832,12 @@ func EmptyAndNonEmptyBlocks(t *testing.T, create createFunc) { gen.AddTx(tx) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(err) for _, block := range chain { - if err := blockchain.Accept(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(block)) } blockchain.DrainAcceptorQueue() @@ -1008,6 +870,7 @@ func EmptyAndNonEmptyBlocks(t *testing.T, create createFunc) { func ReorgReInsert(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1023,10 +886,8 @@ func ReorgReInsert(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) signer := types.HomesteadSigner{} numBlocks := 3 @@ -1035,41 +896,27 @@ func ReorgReInsert(t *testing.T, create createFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert and accept first block - if err := blockchain.InsertBlock(chain[0]); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain[0]); err != nil { - t.Fatal(err) - } + err = blockchain.InsertBlock(chain[0]) + require.NoError(err) + require.NoError(blockchain.Accept(chain[0])) // Insert block and then set preference back (rewind) to last accepted blck - if err := blockchain.InsertBlock(chain[1]); err != nil { - t.Fatal(err) - } - if err := blockchain.SetPreference(chain[0]); err != nil { - t.Fatal(err) - } + err = blockchain.InsertBlock(chain[1]) + require.NoError(err) + require.NoError(blockchain.SetPreference(chain[0])) // Re-insert and accept block - if err := blockchain.InsertBlock(chain[1]); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain[1]); err != nil { - t.Fatal(err) - } + err = blockchain.InsertBlock(chain[1]) + require.NoError(err) + require.NoError(blockchain.Accept(chain[1])) // Build on top of the re-inserted block and accept - if err := blockchain.InsertBlock(chain[2]); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain[2]); err != nil { - t.Fatal(err) - } + err = blockchain.InsertBlock(chain[2]) + require.NoError(err) + require.NoError(blockchain.Accept(chain[2])) blockchain.DrainAcceptorQueue() // Nothing to assert about the state @@ -1114,6 +961,7 @@ func ReorgReInsert(t *testing.T, create createFunc) { //nolint:goimports func AcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1129,10 +977,8 @@ func AcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) signer := types.HomesteadSigner{} _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { @@ -1143,9 +989,7 @@ func AcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { } // Allow the third block to be empty. }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 2, 10, func(i int, gen *BlockGen) { // Send 1/4 of the funds from addr1 to addr2 in tx1 and 3/4 of the funds in tx2. This will produce the identical state // root in the second block of [chain2] as is present in the second block of [chain1]. @@ -1157,63 +1001,43 @@ func AcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { gen.AddTx(tx) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Assert that the block root of the second block in both chains is identical - if chain1[1].Root() != chain2[1].Root() { - t.Fatalf("Expected the latter block in both chain1 and chain2 to have identical state root, but found %s and %s", chain1[1].Root(), chain2[1].Root()) - } + require.Equal(chain1[1].Root(), chain2[1].Root()) // Insert first two blocks of [chain1] and both blocks in [chain2] // This leaves us one additional block to insert on top of [chain1] // after testing that the state roots are handled correctly. - if _, err := blockchain.InsertChain(chain1[:2]); err != nil { - t.Fatal(err) - } - if _, err := blockchain.InsertChain(chain2); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain1[:2]) + require.NoError(err) + _, err = blockchain.InsertChain(chain2) + require.NoError(err) currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[1] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } + require.Equal(expectedCurrentBlock.Hash(), currentBlock.Hash()) // Accept the first block in [chain1] and reject all of [chain2] - if err := blockchain.Accept(chain1[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[0])) blockchain.DrainAcceptorQueue() for _, block := range chain2 { - if err := blockchain.Reject(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Reject(block)) } // Accept the last two blocks in [chain1]. This is a regression test to ensure // that we do not discard a snapshot difflayer that is still in use by a // processing block, when a different block with the same root is rejected. - if err := blockchain.Accept(chain1[1]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[1])) blockchain.DrainAcceptorQueue() lastAcceptedBlock := blockchain.LastConsensusAcceptedBlock() expectedLastAcceptedBlock := chain1[1] - if lastAcceptedBlock.Hash() != expectedLastAcceptedBlock.Hash() { - t.Fatalf("Expected last accepted block to be %s:%d, but found %s%d", expectedLastAcceptedBlock.Hash().Hex(), expectedLastAcceptedBlock.NumberU64(), lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64()) - } + require.Equal(expectedLastAcceptedBlock.Hash(), lastAcceptedBlock.Hash()) - if err := blockchain.InsertBlock(chain1[2]); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain1[2]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.InsertBlock(chain1[2])) + require.NoError(blockchain.Accept(chain1[2])) blockchain.DrainAcceptorQueue() // check the state of the last accepted block @@ -1258,6 +1082,7 @@ func AcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { //nolint:goimports func ReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1273,9 +1098,8 @@ func ReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + t.Cleanup(blockchain.Stop) signer := types.HomesteadSigner{} _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { @@ -1286,9 +1110,7 @@ func ReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { } // Allow the third block to be empty. }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 2, 10, func(i int, gen *BlockGen) { // Send 1/4 of the funds from addr1 to addr2 in tx1 and 3/4 of the funds in tx2. This will produce the identical state // root in the second block of [chain2] as is present in the second block of [chain1]. @@ -1300,88 +1122,61 @@ func ReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create createFunc) { gen.AddTx(tx) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Assert that the block root of the second block in both chains is identical - if chain1[1].Root() != chain2[1].Root() { - t.Fatalf("Expected the latter block in both chain1 and chain2 to have identical state root, but found %s and %s", chain1[1].Root(), chain2[1].Root()) - } + require.Equal(chain1[1].Root(), chain2[1].Root()) // Insert first two blocks of [chain1] and both blocks in [chain2] // This leaves us one additional block to insert on top of [chain1] // after testing that the state roots are handled correctly. - if _, err := blockchain.InsertChain(chain1[:2]); err != nil { - t.Fatal(err) - } - if _, err := blockchain.InsertChain(chain2); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain1[:2]) + require.NoError(err) + _, err = blockchain.InsertChain(chain2) + require.NoError(err) currentBlock := blockchain.CurrentBlock() expectedCurrentBlock := chain1[1] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } - + require.Equal(expectedCurrentBlock.Hash(), currentBlock.Hash()) blockchain.Stop() chainDB = rawdb.NewMemoryDatabase() blockchain, err = create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) // Insert first two blocks of [chain1] and both blocks in [chain2] // This leaves us one additional block to insert on top of [chain1] // after testing that the state roots are handled correctly. - if _, err := blockchain.InsertChain(chain1[:2]); err != nil { - t.Fatal(err) - } - if _, err := blockchain.InsertChain(chain2); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain1[:2]) + require.NoError(err) + _, err = blockchain.InsertChain(chain2) + require.NoError(err) currentBlock = blockchain.CurrentBlock() expectedCurrentBlock = chain1[1] - if currentBlock.Hash() != expectedCurrentBlock.Hash() { - t.Fatalf("Expected current block to be %s:%d, but found %s%d", expectedCurrentBlock.Hash().Hex(), expectedCurrentBlock.NumberU64(), currentBlock.Hash().Hex(), currentBlock.Number.Uint64()) - } + require.Equalf(expectedCurrentBlock.Hash(), currentBlock.Hash(), "block hash mismatch for expected height %d, actual height %d", expectedCurrentBlock.NumberU64(), currentBlock.Number.Uint64()) // Accept the first block in [chain1] and reject all of [chain2] - if err := blockchain.Accept(chain1[0]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[0])) blockchain.DrainAcceptorQueue() for _, block := range chain2 { - if err := blockchain.Reject(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Reject(block)) } // Accept the last two blocks in [chain1]. This is a regression test to ensure // that we do not discard a snapshot difflayer that is still in use by a // processing block, when a different block with the same root is rejected. - if err := blockchain.Accept(chain1[1]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(chain1[1])) blockchain.DrainAcceptorQueue() lastAcceptedBlock := blockchain.LastConsensusAcceptedBlock() expectedLastAcceptedBlock := chain1[1] - if lastAcceptedBlock.Hash() != expectedLastAcceptedBlock.Hash() { - t.Fatalf("Expected last accepted block to be %s:%d, but found %s%d", expectedLastAcceptedBlock.Hash().Hex(), expectedLastAcceptedBlock.NumberU64(), lastAcceptedBlock.Hash().Hex(), lastAcceptedBlock.NumberU64()) - } + require.Equal(expectedLastAcceptedBlock.Hash(), lastAcceptedBlock.Hash()) - if err := blockchain.InsertBlock(chain1[2]); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain1[2]); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.InsertBlock(chain1[2])) + require.NoError(blockchain.Accept(chain1[2])) blockchain.DrainAcceptorQueue() // check the state of the last accepted block @@ -1770,6 +1565,7 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { func ReexecBlocks(t *testing.T, create ReexecTestFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1785,10 +1581,8 @@ func ReexecBlocks(t *testing.T, create ReexecTestFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir(), 4096) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) // This call generates a chain of 10 blocks. signer := types.HomesteadSigner{} @@ -1796,21 +1590,16 @@ func ReexecBlocks(t *testing.T, create ReexecTestFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert three blocks into the chain and accept only the first block. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(err) foundTxs := []common.Hash{} missingTxs := []common.Hash{} for i, block := range chain { - if err := blockchain.Accept(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(block)) if i == 3 { // At height 3, kill the async accepted block processor to force an @@ -1836,15 +1625,11 @@ func ReexecBlocks(t *testing.T, create ReexecTestFunc) { // async worker shutdown cannot be found. for _, tx := range foundTxs { txLookup, _, _ := blockchain.GetTransactionLookup(tx) - if txLookup == nil { - t.Fatalf("missing transaction: %v", tx) - } + require.NotNilf(txLookup, "missing tx: %v", tx) } for _, tx := range missingTxs { txLookup, _, _ := blockchain.GetTransactionLookup(tx) - if txLookup != nil { - t.Fatalf("transaction should be missing: %v", tx) - } + require.Nilf(txLookup, "transaction should be missing: %v", tx) } // check the state of the last accepted block @@ -1884,22 +1669,21 @@ func ReexecBlocks(t *testing.T, create ReexecTestFunc) { allTxs := slices.Concat(foundTxs, missingTxs) for _, bc := range []*BlockChain{newChain, restartedChain} { // We should confirm that snapshots were properly initialized - if bc.snaps == nil && bc.cacheConfig.SnapshotLimit > 0 { - t.Fatal("snapshot initialization failed") + if bc.cacheConfig.SnapshotLimit > 0 { + require.NotNil(bc.snaps, "snapshot initialization failed") } // We should confirm all transactions can now be queried for _, tx := range allTxs { txLookup, _, _ := bc.GetTransactionLookup(tx) - if txLookup == nil { - t.Fatalf("missing transaction: %v", tx) - } + require.NotNilf(txLookup, "missing tx: %v", tx) } } } func ReexecMaxBlocks(t *testing.T, create ReexecTestFunc) { var ( + require = require.New(t) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -1915,10 +1699,8 @@ func ReexecMaxBlocks(t *testing.T, create ReexecTestFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir(), 4096) - if err != nil { - t.Fatal(err) - } - defer blockchain.Stop() + require.NoError(err) + t.Cleanup(blockchain.Stop) // Check that we are generating enough blocks to test the reexec functionality. genNumBlocks := 20 @@ -1930,21 +1712,16 @@ func ReexecMaxBlocks(t *testing.T, create ReexecTestFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Insert three blocks into the chain and accept only the first block. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(err) foundTxs := []common.Hash{} missingTxs := []common.Hash{} for i, block := range chain { - if err := blockchain.Accept(block); err != nil { - t.Fatal(err) - } + require.NoError(blockchain.Accept(block)) if i == numAcceptedBlocks { // kill the async accepted block processor to force an @@ -1970,15 +1747,11 @@ func ReexecMaxBlocks(t *testing.T, create ReexecTestFunc) { // async worker shutdown cannot be found. for _, tx := range foundTxs { txLookup, _, _ := blockchain.GetTransactionLookup(tx) - if txLookup == nil { - t.Fatalf("missing transaction: %v", tx) - } + require.NotNilf(txLookup, "missing transaction: %v", tx) } for _, tx := range missingTxs { txLookup, _, _ := blockchain.GetTransactionLookup(tx) - if txLookup != nil { - t.Fatalf("transaction should be missing: %v", tx) - } + require.Nilf(txLookup, "transaction should be missing: %v", tx) } // check the state of the last accepted block @@ -2016,16 +1789,14 @@ func ReexecMaxBlocks(t *testing.T, create ReexecTestFunc) { allTxs := slices.Concat(foundTxs, missingTxs) for _, bc := range []*BlockChain{newChain, restartedChain} { // We should confirm that snapshots were properly initialized - if bc.snaps == nil && bc.cacheConfig.SnapshotLimit > 0 { - t.Fatal("snapshot initialization failed") + if bc.cacheConfig.SnapshotLimit > 0 { + require.NotNil(bc.snaps, "snapshot initialization failed") } // We should confirm all transactions can now be queried for _, tx := range allTxs { txLookup, _, _ := bc.GetTransactionLookup(tx) - if txLookup == nil { - t.Fatalf("missing transaction: %v", tx) - } + require.NotNilf(txLookup, "missing transaction: %v", tx) } } } @@ -2048,9 +1819,7 @@ func ReexecCorruptedStateTest(t *testing.T, create ReexecTestFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, tempDir, 4096) - if err != nil { - t.Fatalf("failed to create blockchain: %v", err) - } + require.NoError(t, err) // Check that we are generating enough blocks to test the reexec functionality. signer := types.HomesteadSigner{} @@ -2058,18 +1827,14 @@ func ReexecCorruptedStateTest(t *testing.T, create ReexecTestFunc) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Insert three blocks into the chain and accept only the first block. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(t, err) + // Accept only the first block. - if err := blockchain.Accept(chain[0]); err != nil { - t.Fatal(err) - } + require.NoError(t, blockchain.Accept(chain[0])) // Simulate a crash by updating the acceptor tip blockchain.writeBlockAcceptedIndices(chain[1]) @@ -2077,19 +1842,13 @@ func ReexecCorruptedStateTest(t *testing.T, create ReexecTestFunc) { // Restart blockchain with existing state restartedBlockchain, err := create(chainDB, gspec, chain[1].Hash(), tempDir, 4096) - if err != nil { - t.Fatalf("failed to restart blockchain: %v", err) - } + require.NoError(t, err) defer restartedBlockchain.Stop() // We should be able to accept the remaining blocks for _, block := range chain[2:] { - if err := restartedBlockchain.InsertBlock(block); err != nil { - t.Fatalf("failed to insert block %d: %v", block.NumberU64(), err) - } - if err := restartedBlockchain.Accept(block); err != nil { - t.Fatalf("failed to accept block %d: %v", block.NumberU64(), err) - } + require.NoErrorf(t, restartedBlockchain.InsertBlock(block), "inserting block %d", block.NumberU64()) + require.NoErrorf(t, restartedBlockchain.Accept(block), "accepting block %d", block.NumberU64()) } // check the state of the last accepted block diff --git a/core/extstate/database_test.go b/core/extstate/database_test.go index 1056c0c9fb..c35e9d9ff5 100644 --- a/core/extstate/database_test.go +++ b/core/extstate/database_test.go @@ -352,7 +352,7 @@ func FuzzTree(f *testing.F) { fuzzState.deleteStorage(rand.Intn(len(fuzzState.currentAddrs)), rand.Uint64()) } default: - t.Fatalf("unknown step: %d", step) + require.Failf(t, "unknown step", "got: %d", step) } } }) diff --git a/network/network_test.go b/network/network_test.go index ca73861fd1..680f8d6919 100644 --- a/network/network_test.go +++ b/network/network_test.go @@ -241,9 +241,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { senderWg.Wait() require.Equal(t, totalCalls, int(atomic.LoadUint32(&callNum))) for _, nodeID := range nodes { - if _, exists := contactedNodes[nodeID]; !exists { - t.Fatalf("expected nodeID %s to be contacted but was not", nodeID) - } + require.Contains(t, contactedNodes, nodeID, "node %s was not contacted", nodeID) } // ensure empty nodeID is not allowed @@ -438,9 +436,8 @@ func TestRequestMinVersion(t *testing.T) { require.NoError(t, err) var response TestMessage - if _, err = codecManager.Unmarshal(responseBytes, &response); err != nil { - t.Fatal("unexpected error during unmarshal", err) - } + _, err = codecManager.Unmarshal(responseBytes, &response) + require.NoError(t, err) require.Equal(t, "this is a response", response.Message) } diff --git a/plugin/evm/customheader/dynamic_fee_windower_test.go b/plugin/evm/customheader/dynamic_fee_windower_test.go index c3bc8dc2a7..7386c975ca 100644 --- a/plugin/evm/customheader/dynamic_fee_windower_test.go +++ b/plugin/evm/customheader/dynamic_fee_windower_test.go @@ -6,6 +6,8 @@ package customheader import ( "math/big" "testing" + + "github.com/stretchr/testify/require" ) func TestSelectBigWithinBounds(t *testing.T) { @@ -49,9 +51,7 @@ func TestSelectBigWithinBounds(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { v := selectBigWithinBounds(test.lower, test.value, test.upper) - if v.Cmp(test.expected) != 0 { - t.Fatalf("Expected (%d), found (%d)", test.expected, v) - } + require.Zerof(t, v.Cmp(test.expected), "Expected (%d), found (%d)", test.expected, v) }) } } diff --git a/plugin/evm/customtypes/header_ext_test.go b/plugin/evm/customtypes/header_ext_test.go index a8b8e91e4b..bcaea96f8b 100644 --- a/plugin/evm/customtypes/header_ext_test.go +++ b/plugin/evm/customtypes/header_ext_test.go @@ -175,7 +175,7 @@ func allFieldsSet[T interface { case []uint8, []*Header, Transactions, []*Transaction, Withdrawals, []*Withdrawal: assert.NotEmpty(t, f) default: - t.Errorf("Field %q has unsupported type %T", field.Name, f) + assert.Failf(t, "Field %q has unsupported type %T", field.Name, f) } }) } @@ -186,10 +186,7 @@ func assertNonZero[T interface { *big.Int | *common.Hash | *uint64 | *[]uint8 | *Header | *acp226.DelayExcess }](t *testing.T, v T) { t.Helper() - var zero T - if v == zero { - t.Errorf("must not be zero value for %T", v) - } + require.NotZero(t, v) } // Note [TestCopyHeader] tests the [HeaderExtra.PostCopy] method. diff --git a/plugin/evm/gossiper_eth_gossiping_test.go b/plugin/evm/gossiper_eth_gossiping_test.go index 63444938e3..9e7b6e6fcb 100644 --- a/plugin/evm/gossiper_eth_gossiping_test.go +++ b/plugin/evm/gossiper_eth_gossiping_test.go @@ -130,7 +130,7 @@ func attemptAwait(t *testing.T, wg *sync.WaitGroup, delay time.Duration) { select { case <-time.After(delay): - t.Fatal("Timed out waiting for wait group to complete") + require.FailNow(t, "Timed out waiting for wait group to complete") case <-ticker: // The wait group completed without issue } diff --git a/sync/client/client_test.go b/sync/client/client_test.go index 29f8cdd177..85f27c517d 100644 --- a/sync/client/client_test.go +++ b/sync/client/client_test.go @@ -107,9 +107,7 @@ func TestGetCode(t *testing.T) { codeHashes, res, expectedCode := test.setupRequest() responseBytes, err := message.Codec.Marshal(message.Version, res) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Dirty hack required because the client will re-request if it encounters // an error. attempted := false @@ -152,9 +150,7 @@ func TestGetBlocks(t *testing.T) { engine := dummy.NewETHFaker() numBlocks := 110 blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, numBlocks, 0, func(_ int, _ *core.BlockGen) {}) - if err != nil { - t.Fatal("unexpected error when generating test blockchain", err) - } + require.NoError(t, err) require.Len(t, blocks, numBlocks) // Construct client @@ -175,9 +171,7 @@ func TestGetBlocks(t *testing.T) { blockBytes := make([][]byte, 0, len(blocks)) for i := len(blocks) - 1; i >= 0; i-- { buf := new(bytes.Buffer) - if err := blocks[i].EncodeRLP(buf); err != nil { - t.Fatalf("failed to generate expected response %s", err) - } + require.NoError(t, blocks[i].EncodeRLP(buf)) blockBytes = append(blockBytes, buf.Bytes()) } @@ -197,13 +191,8 @@ func TestGetBlocks(t *testing.T) { }, getResponse: func(t *testing.T, request message.BlockRequest) []byte { response, err := blocksRequestHandler.OnBlockRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal(err) - } - - if len(response) == 0 { - t.Fatal("Failed to generate valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response, "Failed to generate valid response") return response }, @@ -220,13 +209,8 @@ func TestGetBlocks(t *testing.T) { getResponse: func(t *testing.T, request message.BlockRequest) []byte { request.Parents -= 5 response, err := blocksRequestHandler.OnBlockRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal(err) - } - - if len(response) == 0 { - t.Fatal("Failed to generate valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) return response }, @@ -254,19 +238,14 @@ func TestGetBlocks(t *testing.T) { }, getResponse: func(t *testing.T, request message.BlockRequest) []byte { response, err := blocksRequestHandler.OnBlockRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatalf("failed to get block response: %s", err) - } + require.NoError(t, err) var blockResponse message.BlockResponse - if _, err = message.Codec.Unmarshal(response, &blockResponse); err != nil { - t.Fatalf("failed to marshal block response: %s", err) - } + _, err = message.Codec.Unmarshal(response, &blockResponse) + require.NoError(t, err) // Replace middle value with garbage data blockResponse.Blocks[10] = []byte("invalid value replacing block bytes") responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) - if err != nil { - t.Fatalf("failed to marshal block response: %s", err) - } + require.NoError(t, err) return responseBytes }, @@ -284,13 +263,8 @@ func TestGetBlocks(t *testing.T) { Height: 99, Parents: 16, }) - if err != nil { - t.Fatal(err) - } - - if len(response) == 0 { - t.Fatal("Failed to generate valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) return response }, @@ -313,9 +287,7 @@ func TestGetBlocks(t *testing.T) { Blocks: blockBytes, } responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) - if err != nil { - t.Fatalf("failed to marshal block response: %s", err) - } + require.NoError(t, err) return responseBytes }, @@ -332,9 +304,7 @@ func TestGetBlocks(t *testing.T) { Blocks: nil, } responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) - if err != nil { - t.Fatalf("failed to marshal block response: %s", err) - } + require.NoError(t, err) return responseBytes }, @@ -353,9 +323,7 @@ func TestGetBlocks(t *testing.T) { Blocks: blockBytes, } responseBytes, err := message.Codec.Marshal(message.Version, blockResponse) - if err != nil { - t.Fatalf("failed to marshal block response: %s", err) - } + require.NoError(t, err) return responseBytes }, @@ -382,15 +350,10 @@ func TestGetBlocks(t *testing.T) { blockResponse, err := stateSyncClient.GetBlocks(ctx, test.request.Hash, test.request.Height, test.request.Parents) if len(test.expectedErr) != 0 { - if err == nil { - t.Fatalf("Expected error: %s, but found no error", test.expectedErr) - } require.ErrorContains(t, err, test.expectedErr) return } - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) test.assertResponse(t, blockResponse) }) @@ -442,12 +405,8 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) return response }, @@ -468,12 +427,8 @@ func TestGetLeafs(t *testing.T) { modifiedRequest := request modifiedRequest.Limit = leafsLimit response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, modifiedRequest) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) return response }, @@ -488,12 +443,8 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) return response }, @@ -512,13 +463,9 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } + require.NoError(t, err) + require.NotEmpty(t, response) - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } return response }, requireResponse: func(t *testing.T, response message.LeafsResponse) { @@ -536,12 +483,9 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) + return response }, requireResponse: func(t *testing.T, response message.LeafsResponse) { @@ -559,12 +503,8 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) return response }, @@ -583,23 +523,17 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) + var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { - t.Fatal(err) - } + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) leafResponse.Keys = leafResponse.Keys[1:] leafResponse.Vals = leafResponse.Vals[1:] modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return modifiedResponse }, expectedErr: errInvalidRangeProof, @@ -613,22 +547,15 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { - t.Fatal(err) - } + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) modifiedRequest := request modifiedRequest.Start = leafResponse.Keys[1] modifiedResponse, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 2, modifiedRequest) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } + require.NoError(t, err) return modifiedResponse }, expectedErr: errInvalidRangeProof, @@ -642,23 +569,16 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { - t.Fatal(err) - } + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) leafResponse.Keys = leafResponse.Keys[:len(leafResponse.Keys)-2] leafResponse.Vals = leafResponse.Vals[:len(leafResponse.Vals)-2] modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return modifiedResponse }, expectedErr: errInvalidRangeProof, @@ -672,24 +592,17 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { - t.Fatal(err) - } + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) // Remove middle key-value pair response leafResponse.Keys = append(leafResponse.Keys[:100], leafResponse.Keys[101:]...) leafResponse.Vals = append(leafResponse.Vals[:100], leafResponse.Vals[101:]...) modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return modifiedResponse }, expectedErr: errInvalidRangeProof, @@ -703,23 +616,16 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { - t.Fatal(err) - } + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) // Remove middle key-value pair response leafResponse.Vals[100] = []byte("garbage value data") modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return modifiedResponse }, expectedErr: errInvalidRangeProof, @@ -733,24 +639,17 @@ func TestGetLeafs(t *testing.T) { }, getResponse: func(t *testing.T, request message.LeafsRequest) []byte { response, err := handler.OnLeafsRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) - if err != nil { - t.Fatal("unexpected error in calling leafs request handler", err) - } - if len(response) == 0 { - t.Fatal("Failed to create valid response") - } + require.NoError(t, err) + require.NotEmpty(t, response) var leafResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafResponse); err != nil { - t.Fatal(err) - } + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) // Remove the proof leafResponse.ProofVals = nil modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return modifiedResponse }, expectedErr: errInvalidRangeProof, @@ -767,9 +666,7 @@ func TestGetLeafs(t *testing.T) { } leafsResponse, ok := response.(message.LeafsResponse) - if !ok { - t.Fatalf("parseLeafsResponse returned incorrect type %T", response) - } + require.True(t, ok, "expected leafs response") test.requireResponse(t, leafsResponse) }) } @@ -806,9 +703,7 @@ func TestGetLeafsRetries(t *testing.T) { mockNetClient.mockResponse(1, nil, goodResponse) res, err := client.GetLeafs(ctx, request) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) require.Len(t, res.Keys, 1024) require.Len(t, res.Vals, 1024) @@ -817,9 +712,7 @@ func TestGetLeafsRetries(t *testing.T) { mockNetClient.mockResponses(nil, invalidResponse, invalidResponse, goodResponse) res, err = client.GetLeafs(ctx, request) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) require.Len(t, res.Keys, 1024) require.Len(t, res.Vals, 1024) diff --git a/sync/handlers/block_request_test.go b/sync/handlers/block_request_test.go index 1083168c97..78c2fcd2d5 100644 --- a/sync/handlers/block_request_test.go +++ b/sync/handlers/block_request_test.go @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/libevm/rlp" "github.com/ava-labs/libevm/triedb" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/ava-labs/subnet-evm/consensus/dummy" "github.com/ava-labs/subnet-evm/core" @@ -78,9 +79,7 @@ func executeBlockRequestTest(t testing.TB, test blockRequestTest, blocks []*type blockRequest.Parents = test.requestedParents responseBytes, err := blockRequestHandler.OnBlockRequest(context.Background(), ids.GenerateTestNodeID(), 1, blockRequest) - if err != nil { - t.Fatal("unexpected error during block request", err) - } + require.NoError(t, err) if test.assertResponse != nil { test.assertResponse(t, mockHandlerStats, responseBytes) } @@ -93,16 +92,13 @@ func executeBlockRequestTest(t testing.TB, test blockRequestTest, blocks []*type assert.NotEmpty(t, responseBytes) var response message.BlockResponse - if _, err = message.Codec.Unmarshal(responseBytes, &response); err != nil { - t.Fatal("error unmarshalling", err) - } + _, err = message.Codec.Unmarshal(responseBytes, &response) + require.NoError(t, err) assert.Len(t, response.Blocks, test.expectedBlocks) for _, blockBytes := range response.Blocks { block := new(types.Block) - if err := rlp.DecodeBytes(blockBytes, block); err != nil { - t.Fatal("could not parse block", err) - } + require.NoError(t, rlp.DecodeBytes(blockBytes, block)) assert.GreaterOrEqual(t, test.startBlockIndex, 0) assert.Equal(t, blocks[test.startBlockIndex].Hash(), block.Hash()) test.startBlockIndex-- @@ -118,10 +114,8 @@ func TestBlockRequestHandler(t *testing.T) { tdb := triedb.NewDatabase(memdb, nil) genesis := gspec.MustCommit(memdb, tdb) engine := dummy.NewETHFaker() - blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 96, 0, func(int, *core.BlockGen) {}) - if err != nil { - t.Fatal("unexpected error when generating test blockchain", err) - } + blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 96, 0, func(_ int, _ *core.BlockGen) {}) + require.NoError(t, err) assert.Len(t, blocks, 96) tests := []blockRequestTest{ @@ -185,14 +179,10 @@ func TestBlockRequestHandlerLargeBlocks(t *testing.T) { data = make([]byte, units.MiB/16) } tx, err := types.SignTx(types.NewTransaction(b.TxNonce(addr1), addr1, big.NewInt(10000), 4_215_304, nil, data), signer, key1) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) b.AddTx(tx) }) - if err != nil { - t.Fatal("unexpected error when generating test blockchain", err) - } + require.NoError(t, err) assert.Len(t, blocks, 96) tests := []blockRequestTest{ @@ -230,10 +220,8 @@ func TestBlockRequestHandlerCtxExpires(t *testing.T) { tdb := triedb.NewDatabase(memdb, nil) genesis := gspec.MustCommit(memdb, tdb) engine := dummy.NewETHFaker() - blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 11, 0, func(int, *core.BlockGen) {}) - if err != nil { - t.Fatal("unexpected error when generating test blockchain", err) - } + blocks, _, err := core.GenerateChain(params.TestChainConfig, genesis, engine, memdb, 11, 0, func(_ int, _ *core.BlockGen) {}) + require.NoError(t, err) assert.Len(t, blocks, 11) @@ -268,23 +256,18 @@ func TestBlockRequestHandlerCtxExpires(t *testing.T) { Height: blocks[10].NumberU64(), Parents: uint16(8), }) - if err != nil { - t.Fatal("unexpected error from BlockRequestHandler", err) - } + require.NoError(t, err) assert.NotEmpty(t, responseBytes) var response message.BlockResponse - if _, err = message.Codec.Unmarshal(responseBytes, &response); err != nil { - t.Fatal("error unmarshalling", err) - } + _, err = message.Codec.Unmarshal(responseBytes, &response) + require.NoError(t, err) // requested 8 blocks, received cancelAfterNumRequests because of timeout assert.Len(t, response.Blocks, cancelAfterNumRequests) for i, blockBytes := range response.Blocks { block := new(types.Block) - if err := rlp.DecodeBytes(blockBytes, block); err != nil { - t.Fatal("could not parse block", err) - } + require.NoError(t, rlp.DecodeBytes(blockBytes, block)) assert.Equal(t, blocks[len(blocks)-i-1].Hash(), block.Hash()) } } diff --git a/sync/handlers/code_request_test.go b/sync/handlers/code_request_test.go index 82b3af4075..f105c39446 100644 --- a/sync/handlers/code_request_test.go +++ b/sync/handlers/code_request_test.go @@ -101,12 +101,9 @@ func TestCodeRequestHandler(t *testing.T) { return } var response message.CodeResponse - if _, err = message.Codec.Unmarshal(responseBytes, &response); err != nil { - t.Fatal("error unmarshalling CodeResponse", err) - } - if len(expectedResponse) != len(response.Data) { - t.Fatalf("Unexpected length of code data expected %d != %d", len(expectedResponse), len(response.Data)) - } + _, err = message.Codec.Unmarshal(responseBytes, &response) + require.NoError(t, err) + require.Len(t, response.Data, len(expectedResponse)) for i, code := range expectedResponse { require.Equal(t, code, response.Data[i], "code bytes mismatch at index %d", i) } diff --git a/sync/handlers/leafs_request_test.go b/sync/handlers/leafs_request_test.go index 6772a2a356..9779d3b0ef 100644 --- a/sync/handlers/leafs_request_test.go +++ b/sync/handlers/leafs_request_test.go @@ -33,9 +33,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { corruptedTrieRoot, _, _ := statesynctest.GenerateTrie(t, r, trieDB, 100, common.HashLength) tr, err := trie.New(trie.TrieID(corruptedTrieRoot), trieDB) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Corrupt [corruptedTrieRoot] statesynctest.CorruptTrie(t, memdb, tr, 5) @@ -422,9 +420,8 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { require.NoError(t, err) var leafsResponse message.LeafsResponse - if _, err := message.Codec.Unmarshal(response, &leafsResponse); err != nil { - t.Fatalf("unexpected error when unmarshalling LeafsResponse: %v", err) - } + _, err = message.Codec.Unmarshal(response, &leafsResponse) + require.NoError(t, err) require.Len(t, leafsResponse.Keys, 500) require.Len(t, leafsResponse.Vals, 500) @@ -437,9 +434,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { "account data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) snapshotProvider.Snapshot = snap return context.Background(), message.LeafsRequest{ Root: accountTrieRoot, @@ -463,9 +458,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { "partial account data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) snapshotProvider.Snapshot = snap it := snap.DiskAccountIterator(common.Hash{}) defer it.Release() @@ -478,9 +471,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { // modify one entry of 1 in 4 segments if i%(segmentLen*4) == 0 { acc, err := types.FullAccount(it.Account()) - if err != nil { - t.Fatalf("could not parse snapshot account: %v", err) - } + require.NoError(t, err) acc.Nonce++ bytes := types.SlimAccountRLP(*acc) rawdb.WriteAccountSnapshot(memdb, it.Hash(), bytes) @@ -515,9 +506,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { "storage data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) snapshotProvider.Snapshot = snap return context.Background(), message.LeafsRequest{ Root: largeTrieRoot, @@ -542,9 +531,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { "partial storage data served from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) snapshotProvider.Snapshot = snap it := snap.DiskStorageIterator(largeStorageAccount, common.Hash{}) defer it.Release() @@ -592,9 +579,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { "last snapshot key removed": { prepareTestFn: func() (context.Context, message.LeafsRequest) { snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) snapshotProvider.Snapshot = snap it := snap.DiskStorageIterator(smallStorageAccount, common.Hash{}) defer it.Release() @@ -627,9 +612,7 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { "request last key when removed from snapshot": { prepareTestFn: func() (context.Context, message.LeafsRequest) { snap, err := snapshot.New(snapConfig, memdb, trieDB, common.Hash{}, accountTrieRoot) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) snapshotProvider.Snapshot = snap it := snap.DiskStorageIterator(smallStorageAccount, common.Hash{}) defer it.Release() @@ -692,9 +675,7 @@ func requireRangeProofIsValid(t *testing.T, request *message.LeafsRequest, respo defer proof.Close() for _, proofVal := range response.ProofVals { proofKey := crypto.Keccak256(proofVal) - if err := proof.Put(proofKey, proofVal); err != nil { - t.Fatal(err) - } + require.NoError(t, proof.Put(proofKey, proofVal)) } } diff --git a/sync/statesync/statesynctest/test_trie.go b/sync/statesync/statesynctest/test_trie.go index 68557d05a0..c2f180f581 100644 --- a/sync/statesync/statesynctest/test_trie.go +++ b/sync/statesync/statesynctest/test_trie.go @@ -25,10 +25,9 @@ import ( // GenerateTrie creates a trie with [numKeys] random key-value pairs inside of [trieDB]. // Returns the root of the generated trie, the slice of keys inserted into the trie in lexicographical // order, and the slice of corresponding values. +// GenerateTrie reads from [rand] and the caller should call rand.Seed(n) for deterministic results func GenerateTrie(t *testing.T, r *rand.Rand, trieDB *triedb.Database, numKeys int, keySize int) (common.Hash, [][]byte, [][]byte) { - if keySize < wrappers.LongLen+1 { - t.Fatal("key size must be at least 9 bytes (8 bytes for uint64 and 1 random byte)") - } + require.GreaterOrEqual(t, keySize, wrappers.LongLen+1, "key size must be at least 9 bytes (8 bytes for uint64 and 1 random byte)") return FillTrie(t, r, 0, numKeys, keySize, trieDB, types.EmptyRootHash) } @@ -36,9 +35,7 @@ func GenerateTrie(t *testing.T, r *rand.Rand, trieDB *triedb.Database, numKeys i // returns inserted keys and values func FillTrie(t *testing.T, r *rand.Rand, start, numKeys int, keySize int, trieDB *triedb.Database, root common.Hash) (common.Hash, [][]byte, [][]byte) { testTrie, err := trie.New(trie.TrieID(root), trieDB) - if err != nil { - t.Fatalf("error creating trie: %v", err) - } + require.NoError(t, err) keys := make([][]byte, 0, numKeys) values := make([][]byte, 0, numKeys) @@ -73,33 +70,24 @@ func FillTrie(t *testing.T, r *rand.Rand, start, numKeys int, keySize int, trieD // non-empty trie at [root]. (all key/value pairs must be equal) func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *triedb.Database, onLeaf func(key, val []byte) error) { trieA, err := trie.New(trie.TrieID(root), a) - if err != nil { - t.Fatalf("error creating trieA, root=%s, err=%v", root, err) - } + require.NoError(t, err) trieB, err := trie.New(trie.TrieID(root), b) - if err != nil { - t.Fatalf("error creating trieB, root=%s, err=%v", root, err) - } + require.NoError(t, err) nodeItA, err := trieA.NodeIterator(nil) - if err != nil { - t.Fatalf("error creating node iterator for trieA, root=%s, err=%v", root, err) - } + require.NoError(t, err) nodeItB, err := trieB.NodeIterator(nil) - if err != nil { - t.Fatalf("error creating node iterator for trieB, root=%s, err=%v", root, err) - } + require.NoError(t, err) itA := trie.NewIterator(nodeItA) itB := trie.NewIterator(nodeItB) + count := 0 for itA.Next() && itB.Next() { count++ require.Equal(t, itA.Key, itB.Key) require.Equal(t, itA.Value, itB.Value) if onLeaf != nil { - if err := onLeaf(itA.Key, itA.Value); err != nil { - t.Fatalf("error in onLeaf callback: %v", err) - } + require.NoError(t, onLeaf(itA.Key, itA.Value)) } } require.NoError(t, itA.Err) @@ -115,25 +103,16 @@ func CorruptTrie(t *testing.T, diskdb ethdb.Batcher, tr *trie.Trie, n int) { // Delete some trie nodes batch := diskdb.NewBatch() nodeIt, err := tr.NodeIterator(nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) count := 0 for nodeIt.Next(true) { count++ if count%n == 0 && nodeIt.Hash() != (common.Hash{}) { - if err := batch.Delete(nodeIt.Hash().Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(t, batch.Delete(nodeIt.Hash().Bytes())) } } - if err := nodeIt.Error(); err != nil { - t.Fatal(err) - } - - if err := batch.Write(); err != nil { - t.Fatal(err) - } + require.NoError(t, nodeIt.Error()) + require.NoError(t, batch.Write()) } // FillAccounts adds [numAccounts] randomly generated accounts to the secure trie at [root] and commits it to [trieDB]. @@ -151,9 +130,7 @@ func FillAccounts( ) tr, err := trie.NewStateTrie(trie.TrieID(root), trieDB) - if err != nil { - t.Fatalf("error opening trie: %v", err) - } + require.NoError(t, err) for i := 0; i < numAccounts; i++ { acc := types.StateAccount{ @@ -167,9 +144,7 @@ func FillAccounts( } accBytes, err := rlp.EncodeToBytes(&acc) - if err != nil { - t.Fatalf("failed to rlp encode account: %v", err) - } + require.NoError(t, err) key := utilstest.NewKey(t) tr.MustUpdate(key.Address[:], accBytes) @@ -177,14 +152,8 @@ func FillAccounts( } newRoot, nodes, err := tr.Commit(false) - if err != nil { - t.Fatalf("error committing trie: %v", err) - } - if err := trieDB.Update(newRoot, root, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { - t.Fatalf("error updating trieDB: %v", err) - } - if err := trieDB.Commit(newRoot, false); err != nil { - t.Fatalf("error committing trieDB: %v", err) - } + require.NoError(t, err) + require.NoError(t, trieDB.Update(newRoot, root, 0, trienode.NewWithNodeSet(nodes), nil)) + require.NoError(t, trieDB.Commit(newRoot, false)) return newRoot, accounts } From 1a0ecea35e3cd8e9a77fde90bea565716c16ebdf Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 11:14:41 -0500 Subject: [PATCH 02/46] subnet-evm specific linting --- .../precompilebind/precompile_bind_test.go | 30 +- core/blockchain_ext_test.go | 71 +- core/blockchain_test.go | 73 +- core/state/trie_prefetcher_extra_test.go | 2 +- core/state_processor_ext_test.go | 8 +- eth/tracers/api_extra_test.go | 55 +- params/extras/precompile_upgrade_test.go | 4 +- plugin/evm/customtypes/block_ext_test.go | 10 +- plugin/evm/syncervm_test.go | 66 +- plugin/evm/version_test.go | 13 +- plugin/evm/vm_test.go | 1090 +++++------------ plugin/evm/vm_upgrade_bytes_test.go | 67 +- .../allowlisttest/test_allowlist_config.go | 5 +- sync/statesync/code_syncer_test.go | 5 +- sync/statesync/statesynctest/test_sync.go | 8 +- sync/statesync/sync_test.go | 6 +- 16 files changed, 466 insertions(+), 1047 deletions(-) diff --git a/accounts/abi/bind/precompilebind/precompile_bind_test.go b/accounts/abi/bind/precompilebind/precompile_bind_test.go index 28a1b9a858..867e2a726f 100644 --- a/accounts/abi/bind/precompilebind/precompile_bind_test.go +++ b/accounts/abi/bind/precompilebind/precompile_bind_test.go @@ -628,9 +628,7 @@ func TestPrecompileBind(t *testing.T) { ws := t.TempDir() pkg := filepath.Join(ws, "precompilebindtest") - if err := os.MkdirAll(pkg, 0o700); err != nil { - t.Fatalf("failed to create package: %v", err) - } + require.NoError(t, os.MkdirAll(pkg, 0o700), "failed to create package") // Generate the test suite for all the contracts for i, tt := range bindTests { t.Run(tt.name, func(t *testing.T) { @@ -643,13 +641,11 @@ func TestPrecompileBind(t *testing.T) { return } if err != nil { - t.Fatalf("test %d: failed to generate binding: %v", i, err) + require.Fail(t, fmt.Sprintf("test %d: failed to generate binding: %v", i, err)) } precompilePath := filepath.Join(pkg, tt.name) - if err := os.MkdirAll(precompilePath, 0o700); err != nil { - t.Fatalf("failed to create package: %v", err) - } + require.NoError(t, os.MkdirAll(precompilePath, 0o700), "failed to create package") for _, file := range bindedFiles { switch file.FileName { case ContractFileName: @@ -663,13 +659,9 @@ func TestPrecompileBind(t *testing.T) { // change address to a suitable one for testing file.Content = strings.Replace(file.Content, `common.HexToAddress("{ASUITABLEHEXADDRESS}")`, `common.HexToAddress("0x03000000000000000000000000000000000000ff")`, 1) } - if err = os.WriteFile(filepath.Join(precompilePath, file.FileName), []byte(file.Content), 0o600); err != nil { - t.Fatalf("test %d: failed to write binding: %v", i, err) - } - } - if err = os.WriteFile(filepath.Join(precompilePath, "contract.abi"), []byte(tt.abi), 0o600); err != nil { - t.Fatalf("test %d: failed to write binding: %v", i, err) + require.NoError(t, os.WriteFile(filepath.Join(precompilePath, file.FileName), []byte(file.Content), 0o600), "test %d: failed to write binding", i) } + require.NoError(t, os.WriteFile(filepath.Join(precompilePath, "contract.abi"), []byte(tt.abi), 0o600), "test %d: failed to write binding", i) // Generate the test file with the injected test code code := fmt.Sprintf(` @@ -684,32 +676,30 @@ func TestPrecompileBind(t *testing.T) { %s } `, tt.name, tt.imports, tt.name, tt.tester) - if err := os.WriteFile(filepath.Join(precompilePath, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0o600); err != nil { - t.Fatalf("test %d: failed to write tests: %v", i, err) - } + require.NoError(t, os.WriteFile(filepath.Join(precompilePath, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0o600), "test %d: failed to write tests", i) }) } moder := exec.Command(gocmd, "mod", "init", "precompilebindtest") moder.Dir = pkg if out, err := moder.CombinedOutput(); err != nil { - t.Fatalf("failed to convert binding test to modules: %v\n%s", err, out) + require.Fail(t, fmt.Sprintf("failed to convert binding test to modules: %v\n%s", err, out)) } pwd, _ := os.Getwd() replacer := exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/ava-labs/subnet-evm@v0.0.0", "-replace", "github.com/ava-labs/subnet-evm="+filepath.Join(pwd, "..", "..", "..", "..")) // Repo root replacer.Dir = pkg if out, err := replacer.CombinedOutput(); err != nil { - t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out) + require.Fail(t, fmt.Sprintf("failed to replace binding test dependency to current source tree: %v\n%s", err, out)) } tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.24") tidier.Dir = pkg if out, err := tidier.CombinedOutput(); err != nil { - t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) + require.Fail(t, fmt.Sprintf("failed to tidy Go module file: %v\n%s", err, out)) } // Test the entire package and report any failures cmd := exec.Command(gocmd, "test", "./...", "-v", "-count", "1") cmd.Dir = pkg if out, err := cmd.CombinedOutput(); err != nil { - t.Fatalf("failed to run binding test: %v\n%s", err, out) + require.Fail(t, fmt.Sprintf("failed to run binding test: %v\n%s", err, out)) } } diff --git a/core/blockchain_ext_test.go b/core/blockchain_ext_test.go index 32351f05af..4651525d4f 100644 --- a/core/blockchain_ext_test.go +++ b/core/blockchain_ext_test.go @@ -6,8 +6,6 @@ package core import ( "fmt" "math/big" - "os" - "path/filepath" "slices" "testing" @@ -141,30 +139,6 @@ func copyMemDB(db ethdb.Database) (ethdb.Database, error) { return newDB, nil } -// This copies all files from a flat directory [src] to a new temporary directory and returns -// the path to the new directory. -func copyFlatDir(t *testing.T, src string) string { - t.Helper() - if src == "" { - return "" - } - - dst := t.TempDir() - ents, err := os.ReadDir(src) - require.NoError(t, err) - - for _, e := range ents { - require.False(t, e.IsDir(), "expected flat directory") - name := e.Name() - data, err := os.ReadFile(filepath.Join(src, name)) - require.NoError(t, err) - info, err := e.Info() - require.NoError(t, err) - require.NoError(t, os.WriteFile(filepath.Join(dst, name), data, info.Mode().Perm())) - } - return dst -} - // checkBlockChainState creates a new BlockChain instance and checks that exporting each block from // genesis to last accepted from the original instance yields the same last accepted block and state // root. @@ -1397,9 +1371,7 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { } blockchain, err := create(chainDB, gspec, common.Hash{}, t.TempDir()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer blockchain.Stop() signer := types.LatestSigner(params.TestChainConfig) @@ -1429,9 +1401,7 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { addTx: func(gen *BlockGen) { feeCap := new(big.Int).Add(gen.BaseFee(), tip) input, err := allowlist.PackModifyAllowList(addr2, allowlist.AdminRole) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, Nonce: gen.TxNonce(addr1), @@ -1444,9 +1414,7 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { }) signedTx, err := types.SignTx(tx, signer, key1) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) gen.AddTx(signedTx) }, verifyState: func(sdb *state.StateDB) error { @@ -1462,22 +1430,16 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { }, verifyGenesis: func(sdb *state.StateDB) { res := deployerallowlist.GetContractDeployerAllowListStatus(sdb, addr1) - if allowlist.AdminRole != res { - t.Fatalf("unexpected allow list status for addr1 %s, expected %s", res, allowlist.AdminRole) - } + require.Equal(t, allowlist.AdminRole, res, "unexpected allow list status for addr1 %s, expected %s", res, allowlist.AdminRole) res = deployerallowlist.GetContractDeployerAllowListStatus(sdb, addr2) - if allowlist.NoRole != res { - t.Fatalf("unexpected allow list status for addr2 %s, expected %s", res, allowlist.NoRole) - } + require.Equal(t, allowlist.NoRole, res, "unexpected allow list status for addr2 %s, expected %s", res, allowlist.NoRole) }, }, "fee manager set config": { addTx: func(gen *BlockGen) { feeCap := new(big.Int).Add(gen.BaseFee(), tip) input, err := feemanager.PackSetFeeConfig(testFeeConfig) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, Nonce: gen.TxNonce(addr1), @@ -1490,9 +1452,7 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { }) signedTx, err := types.SignTx(tx, signer, key1) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) gen.AddTx(signedTx) }, verifyState: func(sdb *state.StateDB) error { @@ -1525,23 +1485,16 @@ func StatefulPrecompiles(t *testing.T, create createFunc) { test.addTx(gen) } }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Insert three blocks into the chain and accept only the first block. - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) - } - if err := blockchain.Accept(chain[0]); err != nil { - t.Fatal(err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(t, err) + require.NoError(t, blockchain.Accept(chain[0])) blockchain.DrainAcceptorQueue() genesisState, err := blockchain.StateAt(blockchain.Genesis().Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, test := range tests { if test.verifyGenesis == nil { continue diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 12d55c8c81..7b994976cb 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/libevm/crypto" "github.com/ava-labs/libevm/eth/tracers/logger" "github.com/ava-labs/libevm/ethdb" + "github.com/stretchr/testify/require" "github.com/ava-labs/subnet-evm/consensus/dummy" "github.com/ava-labs/subnet-evm/core/state/pruner" @@ -354,7 +355,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { blockchain, err := createBlockChain(chainDB, pruningConfig, gspec, common.Hash{}) if err != nil { - t.Fatal(err) + require.NoError(t, err) } defer blockchain.Stop() @@ -365,16 +366,14 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { gen.AddTx(tx) }) if err != nil { - t.Fatal(err) + require.NoError(t, err) } if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatal(err) + require.NoError(t, err) } for _, block := range chain { - if err := blockchain.Accept(block); err != nil { - t.Fatal(err) - } + require.NoError(t, blockchain.Accept(block)) } blockchain.DrainAcceptorQueue() @@ -383,13 +382,13 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { blockchain, err = createBlockChain(chainDB, pruningConfig, gspec, lastAcceptedHash) if err != nil { - t.Fatal(err) + require.NoError(t, err) } // Confirm that the node does not have the state for intermediate nodes (exclude the last accepted block) for _, block := range chain[:len(chain)-1] { if blockchain.HasState(block.Root()) { - t.Fatalf("Expected blockchain to be missing state for intermediate block %d with pruning enabled", block.NumberU64()) + require.Fail(t, fmt.Sprintf("Expected blockchain to be missing state for intermediate block %d with pruning enabled", block.NumberU64())) } } blockchain.Stop() @@ -413,13 +412,13 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { lastAcceptedHash, ) if err != nil { - t.Fatal(err) + require.NoError(t, err) } defer blockchain.Stop() for _, block := range chain { if !blockchain.HasState(block.Root()) { - t.Fatalf("failed to re-generate state for block %d", block.NumberU64()) + require.Fail(t, fmt.Sprintf("failed to re-generate state for block %d", block.NumberU64())) } } } @@ -536,11 +535,11 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { ) _, forkA, _, err := GenerateChainWithGenesis(gspec, engine, c.forkA, 10, func(int, *BlockGen) {}) if err != nil { - t.Fatal(err) + require.NoError(t, err) } _, forkB, _, err := GenerateChainWithGenesis(gspec, engine, c.forkB, 10, func(int, *BlockGen) {}) if err != nil { - t.Fatal(err) + require.NoError(t, err) } // Initialize test chain @@ -549,25 +548,25 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { cacheConfig.ChainDataDir = t.TempDir() chain, err := NewBlockChain(db, cacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) if err != nil { - t.Fatalf("failed to create tester chain: %v", err) + require.Fail(t, fmt.Sprintf("failed to create tester chain: %v", err)) } // Insert forkA and forkB, the canonical should on forkA still if n, err := chain.InsertChain(forkA); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) + require.Fail(t, fmt.Sprintf("block %d: failed to insert into chain: %v", n, err)) } if n, err := chain.InsertChain(forkB); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) + require.Fail(t, fmt.Sprintf("block %d: failed to insert into chain: %v", n, err)) } verify := func(head *types.Block) { if chain.CurrentBlock().Hash() != head.Hash() { - t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash()) + require.Fail(t, fmt.Sprintf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash())) } if chain.CurrentHeader().Hash() != head.Hash() { - t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash()) + require.Fail(t, fmt.Sprintf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash())) } if !chain.HasState(head.Root()) { - t.Fatalf("Lost block state %v %x", head.Number(), head.Hash()) + require.Fail(t, fmt.Sprintf("Lost block state %v %x", head.Number(), head.Hash())) } } @@ -576,9 +575,7 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { verify(forkB[len(forkB)-1]) } else { verify(forkA[len(forkA)-1]) - if err := chain.SetPreference(forkB[len(forkB)-1]); err != nil { - t.Fatal(err) - } + require.NoError(t, chain.SetPreference(forkB[len(forkB)-1])) verify(forkB[len(forkB)-1]) } @@ -587,14 +584,14 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { block := forkB[i] hash := chain.GetCanonicalHash(block.NumberU64()) if hash != block.Hash() { - t.Fatalf("Unexpected canonical hash %d", block.NumberU64()) + require.Fail(t, fmt.Sprintf("Unexpected canonical hash %d", block.NumberU64())) } } if c.forkA > c.forkB { for i := uint64(c.forkB) + 1; i <= uint64(c.forkA); i++ { hash := chain.GetCanonicalHash(i) if hash != (common.Hash{}) { - t.Fatalf("Unexpected canonical hash %d", i) + require.Fail(t, fmt.Sprintf("Unexpected canonical hash %d", i)) } } } @@ -737,13 +734,13 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) { // Tracer: logger.NewJSONLogger(nil, os.Stdout), }, common.Hash{}, false) if err != nil { - t.Fatalf("failed to create tester chain: %v", err) + require.Fail(t, fmt.Sprintf("failed to create tester chain: %v", err)) } defer chain.Stop() // Import the blocks for _, block := range blocks { if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) + require.Fail(t, fmt.Sprintf("block %d: failed to insert into chain: %v", block.NumberU64(), err)) } } } @@ -847,17 +844,17 @@ func TestDeleteThenCreate(t *testing.T) { } }) if err != nil { - t.Fatal(err) + require.NoError(t, err) } // Import the canonical chain chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) if err != nil { - t.Fatalf("failed to create tester chain: %v", err) + require.Fail(t, fmt.Sprintf("failed to create tester chain: %v", err)) } defer chain.Stop() for _, block := range blocks { if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) + require.Fail(t, fmt.Sprintf("block %d: failed to insert into chain: %v", block.NumberU64(), err)) } } } @@ -939,23 +936,21 @@ func TestTransientStorageReset(t *testing.T) { // Initialize the blockchain with 1153 enabled. chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vmConfig, common.Hash{}, false) if err != nil { - t.Fatalf("failed to create tester chain: %v", err) + require.Fail(t, fmt.Sprintf("failed to create tester chain: %v", err)) } defer chain.Stop() // Import the blocks if _, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("failed to insert into chain: %v", err) + require.Fail(t, fmt.Sprintf("failed to insert into chain: %v", err)) } // Check the storage state, err := chain.StateAt(chain.CurrentHeader().Root) if err != nil { - t.Fatalf("Failed to load state %v", err) + require.Fail(t, fmt.Sprintf("Failed to load state %v", err)) } loc := common.BytesToHash([]byte{1}) slot := state.GetState(destAddress, loc) - if slot != (common.Hash{}) { - t.Fatalf("Unexpected dirty storage slot") - } + require.Equal(t, common.Hash{}, slot, "Unexpected dirty storage slot") } func TestEIP3651(t *testing.T) { @@ -1029,11 +1024,11 @@ func TestEIP3651(t *testing.T) { }) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr)}, common.Hash{}, false) if err != nil { - t.Fatalf("failed to create tester chain: %v", err) + require.Fail(t, fmt.Sprintf("failed to create tester chain: %v", err)) } defer chain.Stop() if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) + require.Fail(t, fmt.Sprintf("block %d: failed to insert into chain: %v", n, err)) } block := chain.GetBlockByNumber(1) @@ -1042,7 +1037,7 @@ func TestEIP3651(t *testing.T) { innerGas := vm.GasQuickStep*2 + ethparams.ColdSloadCostEIP2929*2 expectedGas := ethparams.TxGas + 5*vm.GasFastestStep + vm.GasQuickStep + 100 + innerGas // 100 because 0xaaaa is in access list if block.GasUsed() != expectedGas { - t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed()) + require.Fail(t, fmt.Sprintf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed())) } state, _ := chain.State() @@ -1056,7 +1051,7 @@ func TestEIP3651(t *testing.T) { gasPrice := new(big.Int).Add(block.BaseFee(), tx.EffectiveGasTipValue(block.BaseFee())) expected := new(big.Int).SetUint64(block.GasUsed() * gasPrice.Uint64()) if actual.Cmp(expected) != 0 { - t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual) + require.Fail(t, fmt.Sprintf("miner balance incorrect: expected %d, got %d", expected, actual)) } // 4: Ensure the tx sender paid for the gasUsed * (block baseFee + effectiveGasTip). @@ -1064,6 +1059,6 @@ func TestEIP3651(t *testing.T) { // as our handling of the coinbase payment is different. actual = new(big.Int).Sub(funds, state.GetBalance(addr1).ToBig()) if actual.Cmp(expected) != 0 { - t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual) + require.Fail(t, fmt.Sprintf("sender balance incorrect: expected %d, got %d", expected, actual)) } } diff --git a/core/state/trie_prefetcher_extra_test.go b/core/state/trie_prefetcher_extra_test.go index fde2e77f6b..632aec8420 100644 --- a/core/state/trie_prefetcher_extra_test.go +++ b/core/state/trie_prefetcher_extra_test.go @@ -126,7 +126,7 @@ func BenchmarkPrefetcherDatabase(b *testing.B) { if previous != root { require.NoError(db.TrieDB().Dereference(previous)) } else { - b.Fatal("root did not change") + b.Fail() } } require.NoError(levelDB.Close()) diff --git a/core/state_processor_ext_test.go b/core/state_processor_ext_test.go index 5e40a1fa47..632a3313a7 100644 --- a/core/state_processor_ext_test.go +++ b/core/state_processor_ext_test.go @@ -4,6 +4,7 @@ package core import ( + "fmt" "math/big" "testing" @@ -12,6 +13,7 @@ import ( "github.com/ava-labs/libevm/core/types" "github.com/ava-labs/libevm/core/vm" "github.com/ava-labs/libevm/crypto" + "github.com/stretchr/testify/require" "github.com/ava-labs/subnet-evm/consensus/dummy" "github.com/ava-labs/subnet-evm/params" @@ -98,11 +100,9 @@ func TestBadTxAllowListBlock(t *testing.T) { } { block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) - if err == nil { - t.Fatal("block imported without errors") - } + require.Error(t, err, "block imported without errors") if have, want := err.Error(), tt.want; have != want { - t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want) + require.Fail(t, fmt.Sprintf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want)) } } } diff --git a/eth/tracers/api_extra_test.go b/eth/tracers/api_extra_test.go index 16f76850bd..be56497e81 100644 --- a/eth/tracers/api_extra_test.go +++ b/eth/tracers/api_extra_test.go @@ -135,23 +135,16 @@ func testTraceBlockPrecompileActivation(t *testing.T, scheme string) { result, err := api.TraceBlockByNumber(context.Background(), tc.blockNumber, tc.config) if tc.expectErr != nil { if err == nil { - t.Errorf("test %d, want error %v", i, tc.expectErr) + require.Fail(t, fmt.Sprintf("test %d, want error %v", i, tc.expectErr)) continue } - if !reflect.DeepEqual(err, tc.expectErr) { - t.Errorf("test %d: error mismatch, want %v, get %v", i, tc.expectErr, err) - } - continue - } - if err != nil { - t.Errorf("test %d, want no error, have %v", i, err) + require.Equal(t, tc.expectErr, err, "test %d: error mismatch", i) continue } + require.NoError(t, err, "test %d, want no error", i) have, _ := json.Marshal(result) want := tc.want - if string(have) != want { - t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, string(have), want) - } + require.Equal(t, want, string(have), "test %d, result mismatch", i) } } @@ -306,28 +299,19 @@ func testTraceChainPrecompileActivation(t *testing.T, scheme string) { next := c.start + 1 for result := range resCh { - if have, want := uint64(result.Block), next; have != want { - t.Fatalf("unexpected tracing block, have %d want %d", have, want) - } - if have, want := len(result.Traces), int(next); have != want { - t.Fatalf("unexpected result length, have %d want %d", have, want) - } + require.Equal(t, next, uint64(result.Block), "unexpected tracing block") + require.Len(t, result.Traces, int(next), "unexpected result length") for _, trace := range result.Traces { trace.TxHash = common.Hash{} blob, _ := json.Marshal(trace) - if have, want := string(blob), single; have != want { - t.Fatalf("unexpected tracing result, have\n%v\nwant:\n%v", have, want) - } + require.Equal(t, single, string(blob), "unexpected tracing result") } next += 1 } - if next != c.end+1 { - t.Error("Missing tracing block") - } + require.Equal(t, c.end+1, next, "Missing tracing block") - if nref, nrel := ref.Load(), rel.Load(); nref != nrel { - t.Errorf("Ref and deref actions are not equal, ref %d rel %d", nref, nrel) - } + nref, nrel := ref.Load(), rel.Load() + require.Equal(t, nrel, nref, "Ref and deref actions are not equal") } } @@ -448,21 +432,14 @@ func testTraceCallWithOverridesStateUpgrade(t *testing.T, scheme string) { require.ErrorIs(t, err, testspec.expectErr, "test %d", i) continue } else { - if err != nil { - t.Errorf("test %d: expect no error, got %v", i, err) - continue - } + require.NoError(t, err, "test %d: expect no error", i) var have *logger.ExecutionResult - if err := json.Unmarshal(result.(json.RawMessage), &have); err != nil { - t.Errorf("test %d: failed to unmarshal result %v", i, err) - } + err = json.Unmarshal(result.(json.RawMessage), &have) + require.NoError(t, err, "test %d: failed to unmarshal result", i) var want *logger.ExecutionResult - if err := json.Unmarshal([]byte(testspec.expect), &want); err != nil { - t.Errorf("test %d: failed to unmarshal result %v", i, err) - } - if !reflect.DeepEqual(have, want) { - t.Errorf("test %d: result mismatch, want %v, got %v", i, testspec.expect, string(result.(json.RawMessage))) - } + err = json.Unmarshal([]byte(testspec.expect), &want) + require.NoError(t, err, "test %d: failed to unmarshal result", i) + require.Equal(t, want, have, "test %d: result mismatch", i) } } } diff --git a/params/extras/precompile_upgrade_test.go b/params/extras/precompile_upgrade_test.go index 140611318c..49681faa71 100644 --- a/params/extras/precompile_upgrade_test.go +++ b/params/extras/precompile_upgrade_test.go @@ -288,9 +288,7 @@ func (tt *upgradeCompatibilityTest) run(t *testing.T, chainConfig ChainConfig) { // if this is not the final upgradeBytes, continue applying // the next upgradeBytes. (only check the result on the last apply) if i != len(tt.configs)-1 { - if err != nil { - t.Fatalf("expecting checkConfigCompatible call %d to return nil, got %s", i+1, err) - } + require.NoError(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) chainConfig = newCfg continue } diff --git a/plugin/evm/customtypes/block_ext_test.go b/plugin/evm/customtypes/block_ext_test.go index 0985d2ead6..7b80f23fa2 100644 --- a/plugin/evm/customtypes/block_ext_test.go +++ b/plugin/evm/customtypes/block_ext_test.go @@ -4,6 +4,7 @@ package customtypes import ( + "fmt" "math/big" "reflect" "testing" @@ -12,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/vms/evm/acp226" "github.com/ava-labs/libevm/common" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/ava-labs/subnet-evm/internal/blocktest" "github.com/ava-labs/subnet-evm/utils" @@ -139,7 +141,7 @@ func exportedFieldsPointToDifferentMemory[T interface { case []uint8: assertDifferentPointers(t, unsafe.SliceData(f), unsafe.SliceData(fieldCp.([]uint8))) default: - t.Errorf("field %q type %T needs to be added to switch cases of exportedFieldsDeepCopied", field.Name, f) + require.Fail(t, fmt.Sprintf("field %q type %T needs to be added to switch cases of exportedFieldsDeepCopied", field.Name, f)) } }) } @@ -151,11 +153,11 @@ func assertDifferentPointers[T any](t *testing.T, a *T, b any) { t.Helper() switch { case a == nil: - t.Errorf("a (%T) cannot be nil", a) + require.Fail(t, fmt.Sprintf("a (%T) cannot be nil", a)) case b == nil: - t.Errorf("b (%T) cannot be nil", b) + require.Fail(t, fmt.Sprintf("b (%T) cannot be nil", b)) case a == b: - t.Errorf("pointers to same memory") + require.Fail(t, "pointers to same memory") } // Note: no need to check `b` is of the same type as `a`, otherwise // the memory address would be different as well. diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 5d2da06f00..688f45b1df 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -129,7 +129,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { nodeID, hasItem := nodeSet.Pop() if !hasItem { - t.Fatal("expected nodeSet to contain at least 1 nodeID") + require.Fail(t, "expected nodeSet to contain at least 1 nodeID") } go vmSetup.serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) return nil @@ -137,7 +137,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { // Reset metrics to allow re-initialization vmSetup.syncerVM.ctx.Metrics = metrics.NewPrefixGatherer() stateSyncDisabledConfigJSON := `{"state-sync-enabled":false}` - if err := syncDisabledVM.Initialize( + require.NoError(t, syncDisabledVM.Initialize( context.Background(), vmSetup.syncerVM.ctx, vmSetup.syncerDB, @@ -146,18 +146,14 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { []byte(stateSyncDisabledConfigJSON), []*commonEng.Fx{}, appSender, - ); err != nil { - t.Fatal(err) - } + )) defer func() { - if err := syncDisabledVM.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, syncDisabledVM.Shutdown(context.Background())) }() if height := syncDisabledVM.LastAcceptedBlockInternal().Height(); height != 0 { - t.Fatalf("Unexpected last accepted height: %d", height) + require.Fail(t, fmt.Sprintf("Unexpected last accepted height: %d", height)) } enabled, err := syncDisabledVM.StateSyncEnabled(context.Background()) @@ -168,28 +164,18 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { for i := uint64(1); i < 10; i++ { ethBlock := vmSetup.serverVM.blockChain.GetBlockByNumber(i) if ethBlock == nil { - t.Fatalf("VM Server did not have a block available at height %d", i) + require.Fail(t, fmt.Sprintf("VM Server did not have a block available at height %d", i)) } b, err := rlp.EncodeToBytes(ethBlock) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) blk, err := syncDisabledVM.ParseBlock(context.Background(), b) - if err != nil { - t.Fatal(err) - } - if err := blk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := blk.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, blk.Verify(context.Background())) + require.NoError(t, blk.Accept(context.Background())) } // Verify the snapshot disk layer matches the last block root lastRoot := syncDisabledVM.blockChain.CurrentBlock().Root - if err := syncDisabledVM.blockChain.Snapshots().Verify(lastRoot); err != nil { - t.Fatal(err) - } + require.NoError(t, syncDisabledVM.blockChain.Snapshots().Verify(lastRoot)) syncDisabledVM.blockChain.DrainAcceptorQueue() // Create a new VM from the same database with state sync enabled. @@ -201,7 +187,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { ) // Reset metrics to allow re-initialization vmSetup.syncerVM.ctx.Metrics = metrics.NewPrefixGatherer() - if err := syncReEnabledVM.Initialize( + require.NoError(t, syncReEnabledVM.Initialize( context.Background(), vmSetup.syncerVM.ctx, vmSetup.syncerDB, @@ -210,9 +196,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { []byte(configJSON), []*commonEng.Fx{}, appSender, - ); err != nil { - t.Fatal(err) - } + )) // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] vmSetup.serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { @@ -291,7 +275,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s br := predicate.BlockResults{} b, err := br.Bytes() if err != nil { - t.Fatal(err) + panic(err) } gen.AppendExtra(b) @@ -478,7 +462,7 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { br := predicate.BlockResults{} b, err := br.Bytes() if err != nil { - t.Fatal(err) + panic(err) } gen.AppendExtra(b) i := 0 @@ -567,19 +551,11 @@ func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, // generated by GenerateChain acceptExternalBlock := func(block *types.Block) { bytes, err := rlp.EncodeToBytes(block) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vmBlock, err := vm.ParseBlock(context.Background(), bytes) - if err != nil { - t.Fatal(err) - } - if err := vmBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := vmBlock.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.NoError(t, vmBlock.Verify(context.Background())) + require.NoError(t, vmBlock.Accept(context.Background())) if accepted != nil { accepted(block) @@ -598,9 +574,7 @@ func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, gen(i, g) }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) vm.blockChain.DrainAcceptorQueue() } diff --git a/plugin/evm/version_test.go b/plugin/evm/version_test.go index 8347ac8359..ecbb093f55 100644 --- a/plugin/evm/version_test.go +++ b/plugin/evm/version_test.go @@ -28,12 +28,9 @@ func TestCompatibility(t *testing.T) { require.NoError(t, err, "json decoding compatibility file") rpcChainVMVersion, valueInJSON := parsedCompat.RPCChainVMProtocolVersion[Version] - if !valueInJSON { - t.Fatalf("%s has subnet-evm version %s missing from rpcChainVMProtocolVersion object", - filepath.Base(compatibilityFile), Version) - } - if rpcChainVMVersion != version.RPCChainVMProtocol { - t.Fatalf("%s has subnet-evm version %s stated as compatible with RPC chain VM protocol version %d but AvalancheGo protocol version is %d", - filepath.Base(compatibilityFile), Version, rpcChainVMVersion, version.RPCChainVMProtocol) - } + require.True(t, valueInJSON, "%s has subnet-evm version %s missing from rpcChainVMProtocolVersion object", + filepath.Base(compatibilityFile), Version) + require.Equal(t, version.RPCChainVMProtocol, rpcChainVMVersion, + "%s has subnet-evm version %s stated as compatible with RPC chain VM protocol version %d but AvalancheGo protocol version is %d", + filepath.Base(compatibilityFile), Version, rpcChainVMVersion, version.RPCChainVMProtocol) } diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index e4172cfad3..313df6a209 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -347,21 +347,13 @@ func issueAndAccept(t *testing.T, vm *VM) snowman.Block { require.Equal(t, commonEng.PendingTxs, msg) blk, err := vm.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := blk.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, blk.Verify(context.Background())) - if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm.SetPreference(context.Background(), blk.ID())) - if err := blk.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, blk.Accept(context.Background())) return blk } @@ -389,35 +381,31 @@ func testBuildEthTxBlock(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), key.Address, firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } blk1 := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk1.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), key.Address, big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), key.PrivateKey) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } errs = tvm.vm.txPool.AddRemotesSync(txs) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } @@ -425,50 +413,42 @@ func testBuildEthTxBlock(t *testing.T, scheme string) { blk2 := issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk2.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } lastAcceptedID, err := tvm.vm.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if lastAcceptedID != blk2.ID() { - t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID) + require.Fail(t, fmt.Sprintf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID)) } ethBlk1 := blk1.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock if ethBlk1Root := ethBlk1.Root(); !tvm.vm.blockChain.HasState(ethBlk1Root) { - t.Fatalf("Expected blk1 state root to not yet be pruned after blk2 was accepted because of tip buffer") + require.Fail(t, "Expected blk1 state root to not yet be pruned after blk2 was accepted because of tip buffer") } // Clear the cache and ensure that GetBlock returns internal blocks with the correct status tvm.vm.State.Flush() blk2Refreshed, err := tvm.vm.GetBlockInternal(context.Background(), blk2.ID()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) blk1RefreshedID := blk2Refreshed.Parent() blk1Refreshed, err := tvm.vm.GetBlockInternal(context.Background(), blk1RefreshedID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if blk1Refreshed.ID() != blk1.ID() { - t.Fatalf("Found unexpected blkID for parent of blk2") + require.Fail(t, "Found unexpected blkID for parent of blk2") } // Close the vm and all databases - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) restartedVM := &VM{} newCTX := snowtest.Context(t, snowtest.CChainID) newCTX.NetworkUpgrades = upgradetest.GetConfig(fork) newCTX.ChainDataDir = tvm.vm.ctx.ChainDataDir conf := getConfig(scheme, "") - if err := restartedVM.Initialize( + require.NoError(t, restartedVM.Initialize( context.Background(), newCTX, tvm.db, @@ -477,25 +457,21 @@ func testBuildEthTxBlock(t *testing.T, scheme string) { []byte(conf), []*commonEng.Fx{}, nil, - ); err != nil { - t.Fatal(err) - } + )) // State root should not have been committed and discarded on restart if ethBlk1Root := ethBlk1.Root(); restartedVM.blockChain.HasState(ethBlk1Root) { - t.Fatalf("Expected blk1 state root to be pruned after blk2 was accepted on top of it in pruning mode") + require.Fail(t, "Expected blk1 state root to be pruned after blk2 was accepted on top of it in pruning mode") } // State root should be committed when accepted tip on shutdown ethBlk2 := blk2.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock if ethBlk2Root := ethBlk2.Root(); !restartedVM.blockChain.HasState(ethBlk2Root) { - t.Fatalf("Expected blk2 state root to not be pruned after shutdown (last accepted tip should be committed)") + require.Fail(t, "Expected blk2 state root to not be pruned after shutdown (last accepted tip should be committed)") } // Shutdown the newest VM - if err := restartedVM.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, restartedVM.Shutdown(context.Background())) } // Regression test to ensure that after accepting block A @@ -530,13 +506,9 @@ func testSetPreferenceRace(t *testing.T, scheme string) { vm2 := tvm2.vm defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.Shutdown(context.Background())) - if err := vm2.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.Shutdown(context.Background())) }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) @@ -546,14 +518,12 @@ func testSetPreferenceRace(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } @@ -562,43 +532,27 @@ func testSetPreferenceRace(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkA, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := vm1BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkA.Verify(context.Background()), "Block failed verification on VM1") - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkA.ID())) vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") + require.NoError(t, vm2BlkA.Verify(context.Background()), "Block failed verification on VM2") + require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkA.ID())) - if err := vm1BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } - if err := vm2BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkA.Accept(context.Background()), "VM1 failed to accept block") + require.NoError(t, vm2BlkA.Accept(context.Background()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } newHead = <-newTxPoolHeadChan2 if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } // Create list of 10 successive transactions to build block A on vm1 @@ -607,9 +561,7 @@ func testSetPreferenceRace(t *testing.T, scheme string) { for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } @@ -619,7 +571,7 @@ func testSetPreferenceRace(t *testing.T, scheme string) { errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM1 at index %d: %s", i, err)) } } @@ -628,17 +580,11 @@ func testSetPreferenceRace(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkB, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := vm1BlkB.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkB.Verify(context.Background())) - if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkB.ID())) // Split the transactions over two blocks, and set VM2's preference to them in sequence // after building each block @@ -646,7 +592,7 @@ func testSetPreferenceRace(t *testing.T, scheme string) { errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) } } @@ -655,28 +601,22 @@ func testSetPreferenceRace(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkC, err := vm2.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build BlkC on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkC on VM2") - if err := vm2BlkC.Verify(context.Background()); err != nil { - t.Fatalf("BlkC failed verification on VM2: %s", err) - } + require.NoError(t, vm2BlkC.Verify(context.Background()), "BlkC failed verification on VM2") - if err := vm2.SetPreference(context.Background(), vm2BlkC.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkC.ID())) newHead = <-newTxPoolHeadChan2 if newHead.Head.Hash() != common.Hash(vm2BlkC.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } // Block D errs = vm2.txPool.AddRemotesSync(txs[5:10]) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) } } @@ -684,17 +624,11 @@ func testSetPreferenceRace(t *testing.T, scheme string) { require.NoError(t, err) require.Equal(t, commonEng.PendingTxs, msg) vm2BlkD, err := vm2.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build BlkD on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkD on VM2") - if err := vm2BlkD.Verify(context.Background()); err != nil { - t.Fatalf("BlkD failed verification on VM2: %s", err) - } + require.NoError(t, vm2BlkD.Verify(context.Background()), "BlkD failed verification on VM2") - if err := vm2.SetPreference(context.Background(), vm2BlkD.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkD.ID())) // VM1 receives blkC and blkD from VM1 // and happens to call SetPreference on blkD without ever calling SetPreference @@ -703,54 +637,32 @@ func testSetPreferenceRace(t *testing.T, scheme string) { // back to the last accepted block as would typically be the case in the consensus // engine vm1BlkD, err := vm1.ParseBlock(context.Background(), vm2BlkD.Bytes()) - if err != nil { - t.Fatalf("VM1 errored parsing blkD: %s", err) - } + require.NoError(t, err, "VM1 errored parsing blkD") vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) - if err != nil { - t.Fatalf("VM1 errored parsing blkC: %s", err) - } + require.NoError(t, err, "VM1 errored parsing blkC") // The blocks must be verified in order. This invariant is maintained // in the consensus engine. - if err := vm1BlkC.Verify(context.Background()); err != nil { - t.Fatalf("VM1 BlkC failed verification: %s", err) - } - if err := vm1BlkD.Verify(context.Background()); err != nil { - t.Fatalf("VM1 BlkD failed verification: %s", err) - } + require.NoError(t, vm1BlkC.Verify(context.Background()), "VM1 BlkC failed verification") + require.NoError(t, vm1BlkD.Verify(context.Background()), "VM1 BlkD failed verification") // Set VM1's preference to blockD, skipping blockC - if err := vm1.SetPreference(context.Background(), vm1BlkD.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkD.ID())) // Accept the longer chain on both VMs and ensure there are no errors // VM1 Accepts the blocks in order - if err := vm1BlkC.Accept(context.Background()); err != nil { - t.Fatalf("VM1 BlkC failed on accept: %s", err) - } - if err := vm1BlkD.Accept(context.Background()); err != nil { - t.Fatalf("VM1 BlkC failed on accept: %s", err) - } + require.NoError(t, vm1BlkC.Accept(context.Background()), "VM1 BlkC failed on accept") + require.NoError(t, vm1BlkD.Accept(context.Background()), "VM1 BlkC failed on accept") // VM2 Accepts the blocks in order - if err := vm2BlkC.Accept(context.Background()); err != nil { - t.Fatalf("VM2 BlkC failed on accept: %s", err) - } - if err := vm2BlkD.Accept(context.Background()); err != nil { - t.Fatalf("VM2 BlkC failed on accept: %s", err) - } + require.NoError(t, vm2BlkC.Accept(context.Background()), "VM2 BlkC failed on accept") + require.NoError(t, vm2BlkD.Accept(context.Background()), "VM2 BlkC failed on accept") log.Info("Validating canonical chain") // Verify the Canonical Chain for Both VMs - if err := vm2.blockChain.ValidateCanonicalChain(); err != nil { - t.Fatalf("VM2 failed canonical chain verification due to: %s", err) - } + require.NoError(t, vm2.blockChain.ValidateCanonicalChain(), "VM2 failed canonical chain verification due to") - if err := vm1.blockChain.ValidateCanonicalChain(); err != nil { - t.Fatalf("VM1 failed canonical chain verification due to: %s", err) - } + require.NoError(t, vm1.blockChain.ValidateCanonicalChain(), "VM1 failed canonical chain verification due to") } // Regression test to ensure that a VM that accepts block A and B @@ -786,13 +698,9 @@ func testReorgProtection(t *testing.T, scheme string) { vm2 := tvm2.vm defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.Shutdown(context.Background())) - if err := vm2.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.Shutdown(context.Background())) }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) @@ -802,14 +710,12 @@ func testReorgProtection(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } @@ -818,43 +724,27 @@ func testReorgProtection(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkA, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := vm1BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkA.Verify(context.Background()), "Block failed verification on VM1") - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkA.ID())) vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") + require.NoError(t, vm2BlkA.Verify(context.Background()), "Block failed verification on VM2") + require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkA.ID())) - if err := vm1BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } - if err := vm2BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkA.Accept(context.Background()), "VM1 failed to accept block") + require.NoError(t, vm2BlkA.Accept(context.Background()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } newHead = <-newTxPoolHeadChan2 if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } // Create list of 10 successive transactions to build block A on vm1 @@ -863,9 +753,7 @@ func testReorgProtection(t *testing.T, scheme string) { for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } @@ -875,7 +763,7 @@ func testReorgProtection(t *testing.T, scheme string) { errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM1 at index %d: %s", i, err)) } } @@ -884,17 +772,11 @@ func testReorgProtection(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkB, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := vm1BlkB.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkB.Verify(context.Background())) - if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkB.ID())) // Split the transactions over two blocks, and set VM2's preference to them in sequence // after building each block @@ -902,7 +784,7 @@ func testReorgProtection(t *testing.T, scheme string) { errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) } } @@ -911,38 +793,28 @@ func testReorgProtection(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkC, err := vm2.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build BlkC on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkC on VM2") - if err := vm2BlkC.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } + require.NoError(t, vm2BlkC.Verify(context.Background()), "Block failed verification on VM2") vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") - if err := vm1BlkC.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkC.Verify(context.Background()), "Block failed verification on VM1") // Accept B, such that block C should get Rejected. - if err := vm1BlkB.Accept(context.Background()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkB.Accept(context.Background()), "VM1 failed to accept block") // The below (setting preference blocks that have a common ancestor // with the preferred chain lower than the last finalized block) // should NEVER happen. However, the VM defends against this // just in case. if err := vm1.SetPreference(context.Background(), vm1BlkC.ID()); !strings.Contains(err.Error(), "cannot orphan finalized block") { - t.Fatalf("Unexpected error when setting preference that would trigger reorg: %s", err) + require.NoError(t, err, "Unexpected error when setting preference that would trigger reorg") } if err := vm1BlkC.Accept(context.Background()); !strings.Contains(err.Error(), "expected accepted block to have parent") { - t.Fatalf("Unexpected error when setting block at finalized height: %s", err) + require.NoError(t, err, "Unexpected error when setting block at finalized height") } } @@ -972,13 +844,9 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { vm2 := tvm2.vm defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.Shutdown(context.Background())) - if err := vm2.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.Shutdown(context.Background())) }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) @@ -988,14 +856,12 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } @@ -1004,60 +870,44 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkA, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := vm1BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkA.Verify(context.Background()), "Block failed verification on VM1") if _, err := vm1.GetBlockIDAtHeight(context.Background(), vm1BlkA.Height()); err != database.ErrNotFound { - t.Fatalf("Expected unaccepted block not to be indexed by height, but found %s", err) + require.Fail(t, fmt.Sprintf("Expected unaccepted block not to be indexed by height, but found %s", err)) } - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkA.ID())) vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") + require.NoError(t, vm2BlkA.Verify(context.Background()), "Block failed verification on VM2") if _, err := vm2.GetBlockIDAtHeight(context.Background(), vm2BlkA.Height()); err != database.ErrNotFound { - t.Fatalf("Expected unaccepted block not to be indexed by height, but found %s", err) - } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { - t.Fatal(err) + require.Fail(t, fmt.Sprintf("Expected unaccepted block not to be indexed by height, but found %s", err)) } + require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkA.ID())) - if err := vm1BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkA.Accept(context.Background()), "VM1 failed to accept block") if blkID, err := vm1.GetBlockIDAtHeight(context.Background(), vm1BlkA.Height()); err != nil { - t.Fatalf("Height lookuped failed on accepted block: %s", err) + require.NoError(t, err, "Height lookuped failed on accepted block") } else if blkID != vm1BlkA.ID() { - t.Fatalf("Expected accepted block to be indexed by height, but found %s", blkID) - } - if err := vm2BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) + require.Fail(t, fmt.Sprintf("Expected accepted block to be indexed by height, but found %s", blkID)) } + require.NoError(t, vm2BlkA.Accept(context.Background()), "VM2 failed to accept block") if blkID, err := vm2.GetBlockIDAtHeight(context.Background(), vm2BlkA.Height()); err != nil { - t.Fatalf("Height lookuped failed on accepted block: %s", err) + require.NoError(t, err, "Height lookuped failed on accepted block") } else if blkID != vm2BlkA.ID() { - t.Fatalf("Expected accepted block to be indexed by height, but found %s", blkID) + require.Fail(t, fmt.Sprintf("Expected accepted block to be indexed by height, but found %s", blkID)) } newHead := <-newTxPoolHeadChan1 if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } newHead = <-newTxPoolHeadChan2 if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } // Create list of 10 successive transactions to build block A on vm1 @@ -1066,9 +916,7 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } @@ -1078,7 +926,7 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM1 at index %d: %s", i, err)) } } @@ -1087,32 +935,26 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkB, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := vm1BlkB.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkB.Verify(context.Background())) if _, err := vm1.GetBlockIDAtHeight(context.Background(), vm1BlkB.Height()); err != database.ErrNotFound { - t.Fatalf("Expected unaccepted block not to be indexed by height, but found %s", err) + require.Fail(t, fmt.Sprintf("Expected unaccepted block not to be indexed by height, but found %s", err)) } - if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkB.ID())) blkBHeight := vm1BlkB.Height() blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex())) } errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) } } @@ -1121,36 +963,28 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkC, err := vm2.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build BlkC on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkC on VM2") vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") - if err := vm1BlkC.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkC.Verify(context.Background()), "Block failed verification on VM1") if _, err := vm1.GetBlockIDAtHeight(context.Background(), vm1BlkC.Height()); err != database.ErrNotFound { - t.Fatalf("Expected unaccepted block not to be indexed by height, but found %s", err) + require.Fail(t, fmt.Sprintf("Expected unaccepted block not to be indexed by height, but found %s", err)) } - if err := vm1BlkC.Accept(context.Background()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkC.Accept(context.Background()), "VM1 failed to accept block") if blkID, err := vm1.GetBlockIDAtHeight(context.Background(), vm1BlkC.Height()); err != nil { - t.Fatalf("Height lookuped failed on accepted block: %s", err) + require.NoError(t, err, "Height lookuped failed on accepted block") } else if blkID != vm1BlkC.ID() { - t.Fatalf("Expected accepted block to be indexed by height, but found %s", blkID) + require.Fail(t, fmt.Sprintf("Expected accepted block to be indexed by height, but found %s", blkID)) } blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex())) } } @@ -1183,13 +1017,9 @@ func testStickyPreference(t *testing.T, scheme string) { vm2 := tvm2.vm defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.Shutdown(context.Background())) - if err := vm2.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.Shutdown(context.Background())) }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) @@ -1199,14 +1029,12 @@ func testStickyPreference(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } @@ -1215,43 +1043,27 @@ func testStickyPreference(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkA, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := vm1BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkA.Verify(context.Background()), "Block failed verification on VM1") - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkA.ID())) vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") + require.NoError(t, vm2BlkA.Verify(context.Background()), "Block failed verification on VM2") + require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkA.ID())) - if err := vm1BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } - if err := vm2BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkA.Accept(context.Background()), "VM1 failed to accept block") + require.NoError(t, vm2BlkA.Accept(context.Background()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } newHead = <-newTxPoolHeadChan2 if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } // Create list of 10 successive transactions to build block A on vm1 @@ -1260,9 +1072,7 @@ func testStickyPreference(t *testing.T, scheme string) { for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } @@ -1272,7 +1082,7 @@ func testStickyPreference(t *testing.T, scheme string) { errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM1 at index %d: %s", i, err)) } } @@ -1281,28 +1091,22 @@ func testStickyPreference(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkB, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := vm1BlkB.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkB.Verify(context.Background())) - if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkB.ID())) blkBHeight := vm1BlkB.Height() blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex())) } errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) } } @@ -1311,27 +1115,21 @@ func testStickyPreference(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkC, err := vm2.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build BlkC on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkC on VM2") - if err := vm2BlkC.Verify(context.Background()); err != nil { - t.Fatalf("BlkC failed verification on VM2: %s", err) - } + require.NoError(t, vm2BlkC.Verify(context.Background()), "BlkC failed verification on VM2") - if err := vm2.SetPreference(context.Background(), vm2BlkC.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkC.ID())) newHead = <-newTxPoolHeadChan2 if newHead.Head.Hash() != common.Hash(vm2BlkC.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } errs = vm2.txPool.AddRemotesSync(txs[5:]) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) } } @@ -1340,95 +1138,75 @@ func testStickyPreference(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkD, err := vm2.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build BlkD on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkD on VM2") // Parse blocks produced in vm2 vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() vm1BlkD, err := vm1.ParseBlock(context.Background(), vm2BlkD.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") blkDHeight := vm1BlkD.Height() blkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() // Should be no-ops - if err := vm1BlkC.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } - if err := vm1BlkD.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkC.Verify(context.Background()), "Block failed verification on VM1") + require.NoError(t, vm1BlkD.Verify(context.Background()), "Block failed verification on VM1") if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex())) } if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b != nil { - t.Fatalf("expected block at %d to be nil but got %s", blkDHeight, b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected block at %d to be nil but got %s", blkDHeight, b.Hash().Hex())) } if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkBHash { - t.Fatalf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex())) } // Should still be no-ops on re-verify - if err := vm1BlkC.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } - if err := vm1BlkD.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkC.Verify(context.Background()), "Block failed verification on VM1") + require.NoError(t, vm1BlkD.Verify(context.Background()), "Block failed verification on VM1") if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex())) } if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b != nil { - t.Fatalf("expected block at %d to be nil but got %s", blkDHeight, b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected block at %d to be nil but got %s", blkDHeight, b.Hash().Hex())) } if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkBHash { - t.Fatalf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex())) } // Should be queryable after setting preference to side chain - if err := vm1.SetPreference(context.Background(), vm1BlkD.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkD.ID())) if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex())) } if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b.Hash() != blkDHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), b.Hash().Hex())) } if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkDHash { - t.Fatalf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex())) } // Attempt to accept out of order if err := vm1BlkD.Accept(context.Background()); !strings.Contains(err.Error(), "expected accepted block to have parent") { - t.Fatalf("unexpected error when accepting out of order block: %s", err) + require.NoError(t, err, "unexpected error when accepting out of order block") } // Accept in order - if err := vm1BlkC.Accept(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } - if err := vm1BlkD.Accept(context.Background()); err != nil { - t.Fatalf("Block failed acceptance on VM1: %s", err) - } + require.NoError(t, vm1BlkC.Accept(context.Background()), "Block failed verification on VM1") + require.NoError(t, vm1BlkD.Accept(context.Background()), "Block failed acceptance on VM1") // Ensure queryable after accepting if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex())) } if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b.Hash() != blkDHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), b.Hash().Hex())) } if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkDHash { - t.Fatalf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex())) } } @@ -1461,12 +1239,8 @@ func testUncleBlock(t *testing.T, scheme string) { vm2 := tvm2.vm defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - if err := vm2.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.Shutdown(context.Background())) + require.NoError(t, vm2.Shutdown(context.Background())) }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) @@ -1476,14 +1250,12 @@ func testUncleBlock(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } @@ -1492,52 +1264,34 @@ func testUncleBlock(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkA, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := vm1BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkA.Verify(context.Background()), "Block failed verification on VM1") - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkA.ID())) vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") + require.NoError(t, vm2BlkA.Verify(context.Background()), "Block failed verification on VM2") + require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkA.ID())) - if err := vm1BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } - if err := vm2BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkA.Accept(context.Background()), "VM1 failed to accept block") + require.NoError(t, vm2BlkA.Accept(context.Background()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } newHead = <-newTxPoolHeadChan2 if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } @@ -1546,7 +1300,7 @@ func testUncleBlock(t *testing.T, scheme string) { errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM1 at index %d: %s", i, err)) } } @@ -1555,22 +1309,16 @@ func testUncleBlock(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkB, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := vm1BlkB.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkB.Verify(context.Background())) - if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkB.ID())) errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) } } @@ -1579,27 +1327,21 @@ func testUncleBlock(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkC, err := vm2.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build BlkC on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkC on VM2") - if err := vm2BlkC.Verify(context.Background()); err != nil { - t.Fatalf("BlkC failed verification on VM2: %s", err) - } + require.NoError(t, vm2BlkC.Verify(context.Background()), "BlkC failed verification on VM2") - if err := vm2.SetPreference(context.Background(), vm2BlkC.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkC.ID())) newHead = <-newTxPoolHeadChan2 if newHead.Head.Hash() != common.Hash(vm2BlkC.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } errs = vm2.txPool.AddRemotesSync(txs[5:10]) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) } } @@ -1608,9 +1350,7 @@ func testUncleBlock(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkD, err := vm2.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build BlkD on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkD on VM2") // Create uncle block from blkD blkDEthBlock := vm2BlkD.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock @@ -1628,10 +1368,10 @@ func testUncleBlock(t *testing.T, scheme string) { uncleBlock, _ := wrapBlock(uncleEthBlock, tvm2.vm) if err := uncleBlock.Verify(context.Background()); !errors.Is(err, errUnclesUnsupported) { - t.Fatalf("VM2 should have failed with %q but got %q", errUnclesUnsupported, err.Error()) + require.Fail(t, fmt.Sprintf("VM2 should have failed with %q but got %q", errUnclesUnsupported, err.Error())) } if _, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()); err != nil { - t.Fatalf("VM1 errored parsing blkC: %s", err) + require.NoError(t, err, "VM1 errored parsing blkC") } _, err = vm1.ParseBlock(context.Background(), uncleBlock.Bytes()) require.ErrorIs(t, err, errUnclesUnsupported) @@ -1654,21 +1394,17 @@ func testEmptyBlock(t *testing.T, scheme string) { }) defer func() { - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } @@ -1677,9 +1413,7 @@ func testEmptyBlock(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) blk, err := tvm.vm.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") // Create empty block from blkA ethBlock := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock @@ -1693,15 +1427,13 @@ func testEmptyBlock(t *testing.T, scheme string) { ) emptyBlock, err := wrapBlock(emptyEthBlock, tvm.vm) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if _, err := tvm.vm.ParseBlock(context.Background(), emptyBlock.Bytes()); !errors.Is(err, errEmptyBlock) { - t.Fatalf("VM should have failed with errEmptyBlock but got %s", err.Error()) + require.Fail(t, "VM should have failed with errEmptyBlock but got "+err.Error()) } if err := emptyBlock.Verify(context.Background()); !errors.Is(err, errEmptyBlock) { - t.Fatalf("block should have failed verification with errEmptyBlock but got %s", err.Error()) + require.Fail(t, "block should have failed verification with errEmptyBlock but got "+err.Error()) } } @@ -1733,13 +1465,9 @@ func testAcceptReorg(t *testing.T, scheme string) { vm2 := tvm2.vm defer func() { - if err := vm1.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.Shutdown(context.Background())) - if err := vm2.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.Shutdown(context.Background())) }() newTxPoolHeadChan1 := make(chan core.NewTxPoolReorgEvent, 1) @@ -1749,14 +1477,12 @@ func testAcceptReorg(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } @@ -1765,43 +1491,27 @@ func testAcceptReorg(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkA, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := vm1BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkA.Verify(context.Background()), "Block failed verification on VM1") - if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkA.ID())) vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } - if err := vm2BlkA.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM2: %s", err) - } - if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") + require.NoError(t, vm2BlkA.Verify(context.Background()), "Block failed verification on VM2") + require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkA.ID())) - if err := vm1BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM1 failed to accept block: %s", err) - } - if err := vm2BlkA.Accept(context.Background()); err != nil { - t.Fatalf("VM2 failed to accept block: %s", err) - } + require.NoError(t, vm1BlkA.Accept(context.Background()), "VM1 failed to accept block") + require.NoError(t, vm2BlkA.Accept(context.Background()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } newHead = <-newTxPoolHeadChan2 if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } // Create list of 10 successive transactions to build block A on vm1 @@ -1810,9 +1520,7 @@ func testAcceptReorg(t *testing.T, scheme string) { for i := 0; i < 10; i++ { tx := types.NewTransaction(uint64(i), testEthAddrs[0], big.NewInt(10), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm1.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txs[i] = signedTx } @@ -1821,7 +1529,7 @@ func testAcceptReorg(t *testing.T, scheme string) { errs := vm1.txPool.AddRemotesSync(txs) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM1 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM1 at index %d: %s", i, err)) } } @@ -1830,22 +1538,16 @@ func testAcceptReorg(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm1BlkB, err := vm1.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - if err := vm1BlkB.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkB.Verify(context.Background())) - if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkB.ID())) errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) } } @@ -1854,27 +1556,21 @@ func testAcceptReorg(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkC, err := vm2.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build BlkC on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkC on VM2") - if err := vm2BlkC.Verify(context.Background()); err != nil { - t.Fatalf("BlkC failed verification on VM2: %s", err) - } + require.NoError(t, vm2BlkC.Verify(context.Background()), "BlkC failed verification on VM2") - if err := vm2.SetPreference(context.Background(), vm2BlkC.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkC.ID())) newHead = <-newTxPoolHeadChan2 if newHead.Head.Hash() != common.Hash(vm2BlkC.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } errs = vm2.txPool.AddRemotesSync(txs[5:]) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add transaction to VM2 at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) } } @@ -1883,51 +1579,35 @@ func testAcceptReorg(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) vm2BlkD, err := vm2.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build BlkD on VM2: %s", err) - } + require.NoError(t, err, "Failed to build BlkD on VM2") // Parse blocks produced in vm2 vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") vm1BlkD, err := vm1.ParseBlock(context.Background(), vm2BlkD.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing block from vm2: %s", err) - } + require.NoError(t, err, "Unexpected error parsing block from vm2") - if err := vm1BlkC.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } - if err := vm1BlkD.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM1: %s", err) - } + require.NoError(t, vm1BlkC.Verify(context.Background()), "Block failed verification on VM1") + require.NoError(t, vm1BlkD.Verify(context.Background()), "Block failed verification on VM1") blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkBHash { - t.Fatalf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex())) } - if err := vm1BlkC.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkC.Accept(context.Background())) blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkCHash { - t.Fatalf("expected current block to have hash %s but got %s", blkCHash.Hex(), b.Hash().Hex()) - } - if err := vm1BlkB.Reject(context.Background()); err != nil { - t.Fatal(err) + require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkCHash.Hex(), b.Hash().Hex())) } + require.NoError(t, vm1BlkB.Reject(context.Background())) - if err := vm1BlkD.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm1BlkD.Accept(context.Background())) blkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkDHash { - t.Fatalf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex())) } } @@ -1990,21 +1670,17 @@ func TestTimeSemanticVerify(t *testing.T) { }) defer func() { - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.LatestSigner(tvm.vm.chainConfig), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } @@ -2013,12 +1689,10 @@ func TestTimeSemanticVerify(t *testing.T) { require.Equal(t, commonEng.PendingTxs, msg) blk, err := tvm.vm.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") if err := blk.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM: %s", err) + require.NoError(t, err, "Block failed verification on VM") } // Create empty block from blkA @@ -2076,22 +1750,18 @@ func TestBuildTimeMilliseconds(t *testing.T) { }) defer func() { - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() tvm.vm.clock.Set(buildTime) tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } @@ -2100,9 +1770,7 @@ func TestBuildTimeMilliseconds(t *testing.T) { require.Equal(t, commonEng.PendingTxs, msg) blk, err := tvm.vm.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") require.NoError(t, err) ethBlk := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock require.Equal(t, test.expectedTimeMilliseconds, customtypes.BlockTimeMilliseconds(ethBlk)) @@ -2125,21 +1793,17 @@ func testLastAcceptedBlockNumberAllow(t *testing.T, scheme string) { }) defer func() { - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } @@ -2148,17 +1812,11 @@ func testLastAcceptedBlockNumberAllow(t *testing.T, scheme string) { require.Equal(t, commonEng.PendingTxs, msg) blk, err := tvm.vm.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") - if err := blk.Verify(context.Background()); err != nil { - t.Fatalf("Block failed verification on VM: %s", err) - } + require.NoError(t, blk.Verify(context.Background()), "Block failed verification on VM") - if err := tvm.vm.SetPreference(context.Background(), blk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.SetPreference(context.Background(), blk.ID())) blkHeight := blk.Height() blkHash := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() @@ -2167,26 +1825,22 @@ func testLastAcceptedBlockNumberAllow(t *testing.T, scheme string) { ctx := context.Background() b, err := tvm.vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if b.Hash() != blkHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex())) } tvm.vm.eth.APIBackend.SetAllowUnfinalizedQueries(false) _, err = tvm.vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) if !errors.Is(err, eth.ErrUnfinalizedData) { - t.Fatalf("expected ErrUnfinalizedData but got %s", err.Error()) + require.Fail(t, "expected ErrUnfinalizedData but got "+err.Error()) } - if err := blk.Accept(context.Background()); err != nil { - t.Fatalf("VM failed to accept block: %s", err) - } + require.NoError(t, blk.Accept(context.Background()), "VM failed to accept block") if b := tvm.vm.blockChain.GetBlockByNumber(blkHeight); b.Hash() != blkHash { - t.Fatalf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) + require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex())) } } @@ -2200,68 +1854,56 @@ func TestBuildAllowListActivationBlock(t *testing.T) { func testBuildAllowListActivationBlock(t *testing.T, scheme string) { genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM)); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM))) params.GetExtra(genesis.Config).GenesisPrecompiles = extras.Precompiles{ deployerallowlist.ConfigKey: deployerallowlist.NewConfig(utils.TimeToNewUint64(time.Now()), testEthAddrs, nil, nil), } genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tvm := newVM(t, testVMConfig{ genesisJSON: string(genesisJSON), configJSON: getConfig(scheme, ""), }) defer func() { - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) genesisState, err := tvm.vm.blockChain.StateAt(tvm.vm.blockChain.Genesis().Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) role := deployerallowlist.GetContractDeployerAllowListStatus(genesisState, testEthAddrs[0]) if role != allowlist.NoRole { - t.Fatalf("Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) + require.Fail(t, fmt.Sprintf("Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role)) } // Send basic transaction to construct a simple block and confirm that the precompile state configuration in the worker behaves correctly. tx := types.NewTransaction(uint64(0), testEthAddrs[1], new(big.Int).Mul(firstTxAmount, big.NewInt(4)), 21000, big.NewInt(testMinGasPrice*3), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } // Verify that the allow list config activation was handled correctly in the first block. blkState, err := tvm.vm.blockChain.StateAt(blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) role = deployerallowlist.GetContractDeployerAllowListStatus(blkState, testEthAddrs[0]) if role != allowlist.AdminRole { - t.Fatalf("Expected allow list status to be set role %s, but found: %s", allowlist.AdminRole, role) + require.Fail(t, fmt.Sprintf("Expected allow list status to be set role %s, but found: %s", allowlist.AdminRole, role)) } } @@ -2271,9 +1913,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { managerKey := testKeys[1] managerAddress := testEthAddrs[1] genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(toGenesisJSON(paramstest.ForkToChainConfig[upgradetest.Durango]))); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(toGenesisJSON(paramstest.ForkToChainConfig[upgradetest.Durango])))) // this manager role should not be activated because DurangoTimestamp is in the future params.GetExtra(genesis.Config).GenesisPrecompiles = extras.Precompiles{ txallowlist.ConfigKey: txallowlist.NewConfig(utils.NewUint64(0), testEthAddrs[0:1], nil, nil), @@ -2281,9 +1921,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { durangoTime := time.Now().Add(10 * time.Hour) params.GetExtra(genesis.Config).DurangoTimestamp = utils.TimeToNewUint64(durangoTime) genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // prepare the new upgrade bytes to disable the TxAllowList disableAllowListTime := durangoTime.Add(10 * time.Hour) @@ -2310,27 +1948,23 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { }) defer func() { - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) genesisState, err := tvm.vm.blockChain.StateAt(tvm.vm.blockChain.Genesis().Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Check that address 0 is whitelisted and address 1 is not role := txallowlist.GetTxAllowListStatus(genesisState, testEthAddrs[0]) if role != allowlist.AdminRole { - t.Fatalf("Expected allow list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role) + require.Fail(t, fmt.Sprintf("Expected allow list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role)) } role = txallowlist.GetTxAllowListStatus(genesisState, testEthAddrs[1]) if role != allowlist.NoRole { - t.Fatalf("Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) + require.Fail(t, fmt.Sprintf("Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role)) } // Should not be a manager role because Durango has not activated yet role = txallowlist.GetTxAllowListStatus(genesisState, managerAddress) @@ -2343,19 +1977,17 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) + require.NoError(t, err, "Failed to add tx at index") } // Submit a rejected transaction, should throw an error tx1 := types.NewTransaction(uint64(0), testEthAddrs[1], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { - t.Fatalf("expected ErrSenderAddressNotAllowListed, got: %s", err) + require.NoError(t, err, "expected ErrSenderAddressNotAllowListed, got") } // Submit a rejected transaction, should throw an error because manager is not activated @@ -2376,7 +2008,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { txs := block.Transactions() if txs.Len() != 1 { - t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) + require.Fail(t, fmt.Sprintf("Expected number of txs to be %d, but found %d", 1, txs.Len())) } require.Equal(t, signedTx0.Hash(), txs[0].Hash()) @@ -2492,17 +2124,13 @@ func TestVerifyManagerConfig(t *testing.T) { func TestTxAllowListDisablePrecompile(t *testing.T) { // Setup chain params genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(toGenesisJSON(paramstest.ForkToChainConfig[upgradetest.Latest]))); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(toGenesisJSON(paramstest.ForkToChainConfig[upgradetest.Latest])))) enableAllowListTimestamp := upgrade.InitiallyActiveTime // enable at initially active time params.GetExtra(genesis.Config).GenesisPrecompiles = extras.Precompiles{ txallowlist.ConfigKey: txallowlist.NewConfig(utils.TimeToNewUint64(enableAllowListTimestamp), testEthAddrs[0:1], nil, nil), } genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // arbitrary choice ahead of enableAllowListTimestamp disableAllowListTimestamp := enableAllowListTimestamp.Add(10 * time.Hour) @@ -2528,27 +2156,23 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { tvm.vm.clock.Set(disableAllowListTimestamp) // upgrade takes effect after a block is issued, so we can set vm's clock here. defer func() { - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) genesisState, err := tvm.vm.blockChain.StateAt(tvm.vm.blockChain.Genesis().Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Check that address 0 is whitelisted and address 1 is not role := txallowlist.GetTxAllowListStatus(genesisState, testEthAddrs[0]) if role != allowlist.AdminRole { - t.Fatalf("Expected allow list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role) + require.Fail(t, fmt.Sprintf("Expected allow list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role)) } role = txallowlist.GetTxAllowListStatus(genesisState, testEthAddrs[1]) if role != allowlist.NoRole { - t.Fatalf("Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) + require.Fail(t, fmt.Sprintf("Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role)) } // Submit a successful transaction @@ -2558,19 +2182,17 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) + require.NoError(t, err, "Failed to add tx at index") } // Submit a rejected transaction, should throw an error tx1 := types.NewTransaction(uint64(0), testEthAddrs[1], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { - t.Fatalf("expected ErrSenderAddressNotAllowListed, got: %s", err) + require.NoError(t, err, "expected ErrSenderAddressNotAllowListed, got") } blk := issueAndAccept(t, tvm.vm) @@ -2579,7 +2201,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { block := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs := block.Transactions() if txs.Len() != 1 { - t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) + require.Fail(t, fmt.Sprintf("Expected number of txs to be %d, but found %d", 1, txs.Len())) } require.Equal(t, signedTx0.Hash(), txs[0].Hash()) @@ -2591,7 +2213,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { // retry the rejected Tx, which should now succeed errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) + require.NoError(t, err, "Failed to add tx at index") } tvm.vm.clock.Set(tvm.vm.clock.Time().Add(2 * time.Second)) // add 2 seconds for gas fee to adjust @@ -2601,7 +2223,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { block = blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs = block.Transactions() if txs.Len() != 1 { - t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) + require.Fail(t, fmt.Sprintf("Expected number of txs to be %d, but found %d", 1, txs.Len())) } require.Equal(t, signedTx1.Hash(), txs[0].Hash()) } @@ -2610,9 +2232,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { func TestFeeManagerChangeFee(t *testing.T) { // Setup chain params genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM)); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM))) configExtra := params.GetExtra(genesis.Config) configExtra.GenesisPrecompiles = extras.Precompiles{ feemanager.ConfigKey: feemanager.NewConfig(utils.NewUint64(0), testEthAddrs[0:1], nil, nil, nil), @@ -2634,35 +2254,29 @@ func TestFeeManagerChangeFee(t *testing.T) { configExtra.FeeConfig = testLowFeeConfig genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tvm := newVM(t, testVMConfig{ genesisJSON: string(genesisJSON), }) defer func() { - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) tvm.vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) genesisState, err := tvm.vm.blockChain.StateAt(tvm.vm.blockChain.Genesis().Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Check that address 0 is whitelisted and address 1 is not role := feemanager.GetFeeManagerStatus(genesisState, testEthAddrs[0]) if role != allowlist.AdminRole { - t.Fatalf("Expected fee manager list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role) + require.Fail(t, fmt.Sprintf("Expected fee manager list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role)) } role = feemanager.GetFeeManagerStatus(genesisState, testEthAddrs[1]) if role != allowlist.NoRole { - t.Fatalf("Expected fee manager list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) + require.Fail(t, fmt.Sprintf("Expected fee manager list status to be set to no role: %s, but found: %s", allowlist.NoRole, role)) } // Contract is initialized but no preconfig is given, reader should return genesis fee config feeConfig, lastChangedAt, err := tvm.vm.blockChain.GetFeeConfigAt(tvm.vm.blockChain.Genesis().Header()) @@ -2689,19 +2303,17 @@ func TestFeeManagerChangeFee(t *testing.T) { }) signedTx, err := types.SignTx(tx, types.LatestSigner(genesis.Config), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) + require.NoError(t, err, "Failed to add tx at index") } blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } block := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock @@ -2724,9 +2336,7 @@ func TestFeeManagerChangeFee(t *testing.T) { }) signedTx2, err := types.SignTx(tx2, types.LatestSigner(genesis.Config), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2})[0] require.ErrorIs(t, err, txpool.ErrUnderpriced) @@ -2743,14 +2353,10 @@ func TestAllowFeeRecipientDisabled(t *testing.T) { func testAllowFeeRecipientDisabled(t *testing.T, scheme string) { genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM)); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM))) params.GetExtra(genesis.Config).AllowFeeRecipients = false // set to false initially genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tvm := newVM(t, testVMConfig{ genesisJSON: string(genesisJSON), configJSON: getConfig(scheme, ""), @@ -2758,9 +2364,7 @@ func testAllowFeeRecipientDisabled(t *testing.T, scheme string) { tvm.vm.miner.SetEtherbase(common.HexToAddress("0x0123456789")) // set non-blackhole address by force defer func() { - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) @@ -2768,14 +2372,12 @@ func testAllowFeeRecipientDisabled(t *testing.T, scheme string) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], new(big.Int).Mul(firstTxAmount, big.NewInt(4)), 21000, big.NewInt(testMinGasPrice*3), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } @@ -2802,9 +2404,7 @@ func testAllowFeeRecipientDisabled(t *testing.T, scheme string) { ) modifiedBlk, err := wrapBlock(modifiedBlock, tvm.vm) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = modifiedBlk.Verify(context.Background()) require.ErrorIs(t, err, vmerrors.ErrInvalidCoinbase) @@ -2812,31 +2412,23 @@ func testAllowFeeRecipientDisabled(t *testing.T, scheme string) { func TestAllowFeeRecipientEnabled(t *testing.T) { genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM)); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM))) params.GetExtra(genesis.Config).AllowFeeRecipients = true genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) etherBase := common.HexToAddress("0x0123456789") c := config.NewDefaultConfig() c.FeeRecipient = etherBase.String() configJSON, err := json.Marshal(c) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tvm := newVM(t, testVMConfig{ genesisJSON: string(genesisJSON), configJSON: string(configJSON), }) defer func() { - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) @@ -2844,29 +2436,25 @@ func TestAllowFeeRecipientEnabled(t *testing.T) { tx := types.NewTransaction(uint64(0), testEthAddrs[1], new(big.Int).Mul(firstTxAmount, big.NewInt(4)), 21000, big.NewInt(testMinGasPrice*3), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } ethBlock := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock require.Equal(t, etherBase, ethBlock.Coinbase()) // Verify that etherBase has received fees blkState, err := tvm.vm.blockChain.StateAt(ethBlock.Root()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) balance := blkState.GetBalance(etherBase) require.Equal(t, 1, balance.Cmp(common.U2560)) @@ -3162,9 +2750,7 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { }) defer func() { - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) @@ -3174,20 +2760,18 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { tx := types.NewTransaction(uint64(0), key.Address, firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range errs { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan if newHead.Head.Hash() != common.Hash(blk.ID()) { - t.Fatalf("Expected new block to match") + require.Fail(t, "Expected new block to match") } reinitVM := &VM{} @@ -3270,21 +2854,17 @@ func TestParentBeaconRootBlock(t *testing.T) { }) defer func() { - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() tx := types.NewTransaction(uint64(0), testEthAddrs[1], firstTxAmount, 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { if err != nil { - t.Fatalf("Failed to add tx at index %d: %s", i, err) + require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) } } @@ -3293,9 +2873,7 @@ func TestParentBeaconRootBlock(t *testing.T) { require.Equal(t, commonEng.PendingTxs, msg) blk, err := tvm.vm.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Failed to build block with import transaction: %s", err) - } + require.NoError(t, err, "Failed to build block with import transaction") // Modify the block to have a parent beacon root ethBlock := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock @@ -3304,9 +2882,7 @@ func TestParentBeaconRootBlock(t *testing.T) { parentBeaconEthBlock := ethBlock.WithSeal(header) parentBeaconBlock, err := wrapBlock(parentBeaconEthBlock, tvm.vm) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errCheck := func(err error) { if test.expectedError { @@ -3389,9 +2965,7 @@ func TestStandaloneDB(t *testing.T) { func TestFeeManagerRegressionMempoolMinFeeAfterRestart(t *testing.T) { // Setup chain params genesis := &core.Genesis{} - if err := genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM)); err != nil { - t.Fatal(err) - } + require.NoError(t, genesis.UnmarshalJSON([]byte(genesisJSONSubnetEVM))) precompileActivationTime := utils.NewUint64(genesis.Timestamp + 5) // 5 seconds after genesis configExtra := params.GetExtra(genesis.Config) configExtra.GenesisPrecompiles = extras.Precompiles{ @@ -3414,9 +2988,7 @@ func TestFeeManagerRegressionMempoolMinFeeAfterRestart(t *testing.T) { configExtra.FeeConfig = testHighFeeConfig genesisJSON, err := genesis.MarshalJSON() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tvm := newVM(t, testVMConfig{ genesisJSON: string(genesisJSON), }) diff --git a/plugin/evm/vm_upgrade_bytes_test.go b/plugin/evm/vm_upgrade_bytes_test.go index 9529eb8ca7..dc902b7c7d 100644 --- a/plugin/evm/vm_upgrade_bytes_test.go +++ b/plugin/evm/vm_upgrade_bytes_test.go @@ -6,7 +6,6 @@ package evm import ( "context" "encoding/json" - "errors" "fmt" "math/big" "testing" @@ -48,9 +47,7 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { }, } upgradeBytesJSON, err := json.Marshal(upgradeConfig) - if err != nil { - t.Fatalf("could not marshal upgradeConfig to json: %s", err) - } + require.NoError(t, err, "could not marshal upgradeConfig to json") // initialize the VM with these upgrade bytes tvm := newVM(t, testVMConfig{ @@ -58,9 +55,7 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { upgradeJSON: string(upgradeBytesJSON), }) defer func() { - if err := tvm.vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, tvm.vm.Shutdown(context.Background())) }() tvm.vm.clock.Set(enableAllowListTimestamp) @@ -71,20 +66,14 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) - if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) - } + require.NoError(t, errs[0], "Failed to add tx at index") // Submit a rejected transaction, should throw an error tx1 := types.NewTransaction(uint64(0), testEthAddrs[1], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) signedTx1, err := types.SignTx(tx1, types.NewEIP155Signer(tvm.vm.chainConfig.ChainID), testKeys[1].ToECDSA()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { - t.Fatalf("expected ErrSenderAddressNotAllowListed, got: %s", err) - } + require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "expected ErrSenderAddressNotAllowListed") // prepare the new upgrade bytes to disable the TxAllowList disableAllowListTimestamp := tvm.vm.clock.Time().Add(10 * time.Hour) // arbitrary choice @@ -95,32 +84,22 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { }, ) upgradeBytesJSON, err = json.Marshal(upgradeConfig) - if err != nil { - t.Fatalf("could not marshal upgradeConfig to json: %s", err) - } + require.NoError(t, err, "could not marshal upgradeConfig to json") // Reset metrics to allow re-initialization tvm.vm.ctx.Metrics = metrics.NewPrefixGatherer() // restart the vm with the same stateful params newVM := &VM{} - if err := newVM.Initialize( + require.NoError(t, newVM.Initialize( context.Background(), tvm.vm.ctx, tvm.db, []byte(genesisJSONSubnetEVM), upgradeBytesJSON, []byte{}, []*commonEng.Fx{}, tvm.appSender, - ); err != nil { - t.Fatal(err) - } + )) defer func() { - if err := newVM.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, newVM.Shutdown(context.Background())) }() // Set the VM's state to NormalOp to initialize the tx pool. - if err := newVM.SetState(context.Background(), snow.Bootstrapping); err != nil { - t.Fatal(err) - } - if err := newVM.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } + require.NoError(t, newVM.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(t, newVM.SetState(context.Background(), snow.NormalOp)) newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) newVM.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) newVM.clock.Set(disableAllowListTimestamp) @@ -128,24 +107,18 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { // Make a block, previous rules still apply (TxAllowList is active) // Submit a successful transaction errs = newVM.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) - if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) - } + require.NoError(t, errs[0], "Failed to add tx at index") // Submit a rejected transaction, should throw an error errs = newVM.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { - t.Fatalf("expected ErrSenderAddressNotAllowListed, got: %s", err) - } + require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "expected ErrSenderAddressNotAllowListed") blk := issueAndAccept(t, newVM) // Verify that the constructed block only has the whitelisted tx block := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs := block.Transactions() - if txs.Len() != 1 { - t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) - } + require.Equal(t, 1, txs.Len(), "Expected number of txs to be %d, but found %d", 1, txs.Len()) assert.Equal(t, signedTx0.Hash(), txs[0].Hash()) // verify the issued block is after the network upgrade @@ -155,9 +128,7 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { // retry the rejected Tx, which should now succeed errs = newVM.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) - } + require.NoError(t, errs[0], "Failed to add tx at index") newVM.clock.Set(newVM.clock.Time().Add(2 * time.Second)) // add 2 seconds for gas fee to adjust blk = issueAndAccept(t, newVM) @@ -165,9 +136,7 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { // Verify that the constructed block only has the previously rejected tx block = blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs = block.Transactions() - if txs.Len() != 1 { - t.Fatalf("Expected number of txs to be %d, but found %d", 1, txs.Len()) - } + require.Equal(t, 1, txs.Len(), "Expected number of txs to be %d, but found %d", 1, txs.Len()) assert.Equal(t, signedTx1.Hash(), txs[0].Hash()) } @@ -222,9 +191,7 @@ func TestNetworkUpgradesOverridden(t *testing.T) { signedTx0, err := types.SignTx(tx0, types.NewEIP155Signer(restartedVM.chainConfig.ChainID), testKeys[0].ToECDSA()) require.NoError(t, err) errs := restartedVM.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) - if err := errs[0]; err != nil { - t.Fatalf("Failed to add tx at index: %s", err) - } + require.NoError(t, errs[0], "Failed to add tx at index") blk := issueAndAccept(t, restartedVM) require.NotNil(t, blk) diff --git a/precompile/allowlist/allowlisttest/test_allowlist_config.go b/precompile/allowlist/allowlisttest/test_allowlist_config.go index dc4e0e7beb..72b4e2be89 100644 --- a/precompile/allowlist/allowlisttest/test_allowlist_config.go +++ b/precompile/allowlist/allowlisttest/test_allowlist_config.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/ava-labs/libevm/common" + "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" "github.com/ava-labs/subnet-evm/precompile/allowlist" @@ -212,7 +213,7 @@ func VerifyPrecompileWithAllowListTests(t *testing.T, module modules.Module, ver // Add the contract specific tests to the map of tests to run. for name, test := range verifyTests { if _, exists := tests[name]; exists { - t.Fatalf("duplicate test name: %s", name) + require.Failf(t, "duplicate test name", "name: %s", name) } tests[name] = test } @@ -226,7 +227,7 @@ func EqualPrecompileWithAllowListTests(t *testing.T, module modules.Module, equa // Add the contract specific tests to the map of tests to run. for name, test := range equalTests { if _, exists := tests[name]; exists { - t.Fatalf("duplicate test name: %s", name) + require.Failf(t, "duplicate test name", "name: %s", name) } tests[name] = test } diff --git a/sync/statesync/code_syncer_test.go b/sync/statesync/code_syncer_test.go index 86d0b06106..6d22740eba 100644 --- a/sync/statesync/code_syncer_test.go +++ b/sync/statesync/code_syncer_test.go @@ -71,13 +71,12 @@ func testCodeSyncer(t *testing.T, test codeSyncerTest) { err := <-codeSyncer.Done() if test.err != nil { if err == nil { - t.Fatal(t, "expected non-nil error: %s", test.err) + require.Fail(t, "expected non-nil error", "error: %s", test.err) } assert.ErrorIs(t, err, test.err) return - } else if err != nil { - t.Fatal(err) } + require.NoError(t, err) // Assert that the client synced the code correctly. for i, codeHash := range codeHashes { diff --git a/sync/statesync/statesynctest/test_sync.go b/sync/statesync/statesynctest/test_sync.go index 646e6d1c0a..3594207de1 100644 --- a/sync/statesync/statesynctest/test_sync.go +++ b/sync/statesync/statesynctest/test_sync.go @@ -35,9 +35,7 @@ func AssertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database } numSnapshotAccounts++ } - if err := accountIt.Error(); err != nil { - t.Fatal(err) - } + require.NoError(t, accountIt.Error()) trieAccountLeaves := 0 AssertTrieConsistency(t, root, serverTrieDB, clientTrieDB, func(key, val []byte) error { @@ -94,9 +92,7 @@ func FillAccountsWithStorage(t *testing.T, r *rand.Rand, serverDB ethdb.Database newRoot, _ := FillAccounts(t, r, serverTrieDB, root, numAccounts, func(t *testing.T, _ int, account types.StateAccount) types.StateAccount { codeBytes := make([]byte, 256) _, err := r.Read(codeBytes) - if err != nil { - t.Fatalf("error reading random code bytes: %v", err) - } + require.NoError(t, err, "error reading random code bytes") codeHash := crypto.Keccak256Hash(codeBytes) rawdb.WriteCode(serverDB, codeHash, codeBytes) diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index 143d9ae268..4589c2cf58 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -100,7 +100,7 @@ func waitFor(t *testing.T, ctx context.Context, resultFunc func(context.Context) pprof.Lookup("goroutine").WriteTo(&stackBuf, 2) t.Log(stackBuf.String()) // fail the test - t.Fatal("unexpected timeout waiting for sync result") + require.Fail(t, "unexpected timeout waiting for sync result") } require.ErrorIs(t, err, expected, "result of sync did not match expected error") @@ -548,9 +548,7 @@ func TestDifferentWaitContext(t *testing.T) { MaxOutstandingCodeHashes: DefaultMaxOutstandingCodeHashes, RequestSize: 1024, }) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Create two different contexts startCtx := context.Background() // Never cancelled From fc68ccd1a3dba5d29e16838490bcfc1c09570232 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 11:24:46 -0500 Subject: [PATCH 03/46] fix merge --- core/blockchain_ext_test.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/core/blockchain_ext_test.go b/core/blockchain_ext_test.go index 628685e9e4..fd844d43e6 100644 --- a/core/blockchain_ext_test.go +++ b/core/blockchain_ext_test.go @@ -6,6 +6,8 @@ package core import ( "fmt" "math/big" + "os" + "path/filepath" "slices" "testing" @@ -139,6 +141,30 @@ func copyMemDB(db ethdb.Database) (ethdb.Database, error) { return newDB, nil } +// This copies all files from a flat directory [src] to a new temporary directory and returns +// the path to the new directory. +func copyFlatDir(t *testing.T, src string) string { + t.Helper() + if src == "" { + return "" + } + + dst := t.TempDir() + ents, err := os.ReadDir(src) + require.NoError(t, err) + + for _, e := range ents { + require.False(t, e.IsDir(), "expected flat directory") + name := e.Name() + data, err := os.ReadFile(filepath.Join(src, name)) + require.NoError(t, err) + info, err := e.Info() + require.NoError(t, err) + require.NoError(t, os.WriteFile(filepath.Join(dst, name), data, info.Mode().Perm())) + } + return dst +} + // checkBlockChainState creates a new BlockChain instance and checks that exporting each block from // genesis to last accepted from the original instance yields the same last accepted block and state // root. From ce76ccff5b559b4e2abb812d6d1183768316ae0b Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 12:19:12 -0500 Subject: [PATCH 04/46] use require nil instead --- params/extras/debug_test.go | 1 + params/extras/precompile_upgrade_test.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 params/extras/debug_test.go diff --git a/params/extras/debug_test.go b/params/extras/debug_test.go new file mode 100644 index 0000000000..0519ecba6e --- /dev/null +++ b/params/extras/debug_test.go @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/params/extras/precompile_upgrade_test.go b/params/extras/precompile_upgrade_test.go index 49681faa71..4256683442 100644 --- a/params/extras/precompile_upgrade_test.go +++ b/params/extras/precompile_upgrade_test.go @@ -288,7 +288,7 @@ func (tt *upgradeCompatibilityTest) run(t *testing.T, chainConfig ChainConfig) { // if this is not the final upgradeBytes, continue applying // the next upgradeBytes. (only check the result on the last apply) if i != len(tt.configs)-1 { - require.NoError(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) + require.Nil(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) chainConfig = newCfg continue } From 4405b43316eca803a2f3696433d14f87514d76f1 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 12:27:20 -0500 Subject: [PATCH 05/46] fix linting --- core/blockchain_ext_test.go | 13 ++++----- params/extras/debug_test.go | 1 - scripts/upstream_files.txt | 53 +++++++++++++++++++------------------ 3 files changed, 32 insertions(+), 35 deletions(-) delete mode 100644 params/extras/debug_test.go diff --git a/core/blockchain_ext_test.go b/core/blockchain_ext_test.go index fd844d43e6..6443676fc3 100644 --- a/core/blockchain_ext_test.go +++ b/core/blockchain_ext_test.go @@ -899,23 +899,20 @@ func ReorgReInsert(t *testing.T, create createFunc) { require.NoError(err) // Insert and accept first block - err = blockchain.InsertBlock(chain[0]) - require.NoError(err) + + require.NoError(blockchain.InsertBlock(chain[0])) require.NoError(blockchain.Accept(chain[0])) // Insert block and then set preference back (rewind) to last accepted blck - err = blockchain.InsertBlock(chain[1]) - require.NoError(err) + require.NoError(blockchain.InsertBlock(chain[1])) require.NoError(blockchain.SetPreference(chain[0])) // Re-insert and accept block - err = blockchain.InsertBlock(chain[1]) - require.NoError(err) + require.NoError(blockchain.InsertBlock(chain[1])) require.NoError(blockchain.Accept(chain[1])) // Build on top of the re-inserted block and accept - err = blockchain.InsertBlock(chain[2]) - require.NoError(err) + require.NoError(blockchain.InsertBlock(chain[2])) require.NoError(blockchain.Accept(chain[2])) blockchain.DrainAcceptorQueue() diff --git a/params/extras/debug_test.go b/params/extras/debug_test.go deleted file mode 100644 index 0519ecba6e..0000000000 --- a/params/extras/debug_test.go +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/scripts/upstream_files.txt b/scripts/upstream_files.txt index c04df0f30d..910aa26d34 100644 --- a/scripts/upstream_files.txt +++ b/scripts/upstream_files.txt @@ -1,3 +1,29 @@ +accounts/* +cmd/* +consensus/* +core/* +eth/* +ethclient/* +internal/* +log/* +miner/* +node/* +params/config.go +params/config_test.go +params/denomination.go +params/network_params.go +params/version.go +plugin/evm/customtypes/block_test.go +plugin/evm/customtypes/hashing_test.go +plugin/evm/customtypes/rlp_fuzzer_test.go +plugin/evm/customtypes/types_test.go +rpc/* +signer/* +tests/init.go +tests/rlp_test_util.go +tests/state_test_util.go +triedb/* + !accounts/abi/abi_extra_test.go !accounts/abi/bind/bind_extra.go !accounts/abi/bind/precompilebind/* @@ -26,31 +52,6 @@ !ethclient/client_interface_test.go !internal/ethapi/api_extra.go !internal/ethapi/api_extra_test.go -!plugin/evm/customtypes/* !triedb/firewood/* -accounts/* -cmd/* -consensus/* -core/* -eth/* -ethclient/* -internal/* -log/* -miner/* -node/* -params/config.go -params/config_test.go -params/denomination.go -params/network_params.go -params/version.go -plugin/evm/customtypes/block_test.go -plugin/evm/customtypes/hashing_test.go -plugin/evm/customtypes/rlp_fuzzer_test.go -plugin/evm/customtypes/types_test.go -rpc/* -signer/* -tests/init.go -tests/rlp_test_util.go -tests/state_test_util.go -triedb/* + From 7ed4f7b16712c778920438699df748a56719356f Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 12:39:12 -0500 Subject: [PATCH 06/46] Add set default log from coreth --- core/main_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/main_test.go b/core/main_test.go index bd41436cb1..54d505f88f 100644 --- a/core/main_test.go +++ b/core/main_test.go @@ -4,10 +4,12 @@ package core import ( + "os" "testing" "go.uber.org/goleak" + "github.com/ava-labs/libevm/log" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/plugin/evm/customtypes" ) @@ -20,6 +22,9 @@ func TestMain(m *testing.M) { customtypes.Register() params.RegisterExtras() + // May of these tests are likely to fail due to `log.Crit` in goroutines. + log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelCrit, true))) + opts := []goleak.Option{ // No good way to shut down these goroutines: goleak.IgnoreTopFunction("github.com/ava-labs/subnet-evm/core/state/snapshot.(*diskLayer).generate"), From d725b551e0008a7687b69d79c479fd7cab009a1b Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 12:44:43 -0500 Subject: [PATCH 07/46] lint --- core/main_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/main_test.go b/core/main_test.go index 54d505f88f..0ebb8f12a7 100644 --- a/core/main_test.go +++ b/core/main_test.go @@ -7,9 +7,9 @@ import ( "os" "testing" + "github.com/ava-labs/libevm/log" "go.uber.org/goleak" - "github.com/ava-labs/libevm/log" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/plugin/evm/customtypes" ) From 104c7cb0fa1e1be63850668f54899f005face1c7 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 13:01:46 -0500 Subject: [PATCH 08/46] fix merge --- core/blockchain_ext_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/blockchain_ext_test.go b/core/blockchain_ext_test.go index 6443676fc3..73a2fbfde6 100644 --- a/core/blockchain_ext_test.go +++ b/core/blockchain_ext_test.go @@ -212,9 +212,9 @@ func checkBlockChainState( // Copy the database over to prevent any issues when re-using [originalDB] after this call. originalDB, err = copyMemDB(originalDB) require.NoError(err) - restartedChain, err := create(originalDB, gspec, lastAcceptedBlock.Hash(), oldChainDataDir) + newChainDataDir := copyFlatDir(t, oldChainDataDir) + restartedChain, err := create(originalDB, gspec, lastAcceptedBlock.Hash(), newChainDataDir) require.NoError(err) - defer restartedChain.Stop() currentBlock := restartedChain.CurrentBlock() require.Equal(lastAcceptedBlock.Hash(), currentBlock.Hash(), "Restarted chain's current block does not match last accepted block") restartedLastAcceptedBlock := restartedChain.LastConsensusAcceptedBlock() From 94f3547087980c30f3543cc6872a0ffd54c4064f Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 16:14:10 -0500 Subject: [PATCH 09/46] get rid of a lot of require.Fail --- .../precompilebind/precompile_bind_test.go | 27 +- core/blockchain_test.go | 106 ++-- core/state_processor_ext_test.go | 6 +- plugin/evm/syncervm_test.go | 9 +- plugin/evm/vm_test.go | 468 +++++------------- .../allowlisttest/test_allowlist_config.go | 10 +- 6 files changed, 180 insertions(+), 446 deletions(-) diff --git a/accounts/abi/bind/precompilebind/precompile_bind_test.go b/accounts/abi/bind/precompilebind/precompile_bind_test.go index 867e2a726f..90967d6ba1 100644 --- a/accounts/abi/bind/precompilebind/precompile_bind_test.go +++ b/accounts/abi/bind/precompilebind/precompile_bind_test.go @@ -640,9 +640,7 @@ func TestPrecompileBind(t *testing.T) { require.ErrorContains(t, err, tt.errMsg) return } - if err != nil { - require.Fail(t, fmt.Sprintf("test %d: failed to generate binding: %v", i, err)) - } + require.NoError(t, err, "test %d: failed to generate binding: %v", i, err) precompilePath := filepath.Join(pkg, tt.name) require.NoError(t, os.MkdirAll(precompilePath, 0o700), "failed to create package") @@ -682,24 +680,23 @@ func TestPrecompileBind(t *testing.T) { moder := exec.Command(gocmd, "mod", "init", "precompilebindtest") moder.Dir = pkg - if out, err := moder.CombinedOutput(); err != nil { - require.Fail(t, fmt.Sprintf("failed to convert binding test to modules: %v\n%s", err, out)) - } + out, err := moder.CombinedOutput() + require.NoError(t, err, "failed to convert binding test to modules: %v\n%s", err, out) + pwd, _ := os.Getwd() replacer := exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/ava-labs/subnet-evm@v0.0.0", "-replace", "github.com/ava-labs/subnet-evm="+filepath.Join(pwd, "..", "..", "..", "..")) // Repo root replacer.Dir = pkg - if out, err := replacer.CombinedOutput(); err != nil { - require.Fail(t, fmt.Sprintf("failed to replace binding test dependency to current source tree: %v\n%s", err, out)) - } + out, err = replacer.CombinedOutput() + require.NoError(t, err, "failed to replace binding test dependency to current source tree: %v\n%s", err, out) + tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.24") tidier.Dir = pkg - if out, err := tidier.CombinedOutput(); err != nil { - require.Fail(t, fmt.Sprintf("failed to tidy Go module file: %v\n%s", err, out)) - } + out, err = tidier.CombinedOutput() + require.NoError(t, err, "failed to tidy Go module file: %v\n%s", err, out) + // Test the entire package and report any failures cmd := exec.Command(gocmd, "test", "./...", "-v", "-count", "1") cmd.Dir = pkg - if out, err := cmd.CombinedOutput(); err != nil { - require.Fail(t, fmt.Sprintf("failed to run binding test: %v\n%s", err, out)) - } + out, err = cmd.CombinedOutput() + require.NoError(t, err, "failed to run binding test: %v\n%s", err, out) } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 7b994976cb..ecb47cd0d5 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -387,9 +387,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { // Confirm that the node does not have the state for intermediate nodes (exclude the last accepted block) for _, block := range chain[:len(chain)-1] { - if blockchain.HasState(block.Root()) { - require.Fail(t, fmt.Sprintf("Expected blockchain to be missing state for intermediate block %d with pruning enabled", block.NumberU64())) - } + require.False(t, blockchain.HasState(block.Root()), "Expected blockchain to be missing state for intermediate block %d with pruning enabled", block.NumberU64()) } blockchain.Stop() @@ -411,15 +409,11 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { gspec, lastAcceptedHash, ) - if err != nil { - require.NoError(t, err) - } + require.NoError(t, err, "testRepopulateMissingTriesParallel: failed to create blockchain") defer blockchain.Stop() for _, block := range chain { - if !blockchain.HasState(block.Root()) { - require.Fail(t, fmt.Sprintf("failed to re-generate state for block %d", block.NumberU64())) - } + require.True(t, blockchain.HasState(block.Root()), "Expected blockchain to have state for block %d", block.NumberU64()) } } @@ -547,27 +541,18 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { cacheConfig := DefaultCacheConfigWithScheme(scheme) cacheConfig.ChainDataDir = t.TempDir() chain, err := NewBlockChain(db, cacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) - if err != nil { - require.Fail(t, fmt.Sprintf("failed to create tester chain: %v", err)) - } + require.NoError(t, err, "failed to create tester chain: %v", err) + // Insert forkA and forkB, the canonical should on forkA still - if n, err := chain.InsertChain(forkA); err != nil { - require.Fail(t, fmt.Sprintf("block %d: failed to insert into chain: %v", n, err)) - } - if n, err := chain.InsertChain(forkB); err != nil { - require.Fail(t, fmt.Sprintf("block %d: failed to insert into chain: %v", n, err)) - } + n, err := chain.InsertChain(forkA) + require.NoError(t, err, "block %d: failed to insert into chain: %v", n, err) + n, err = chain.InsertChain(forkB) + require.NoError(t, err, "block %d: failed to insert into chain: %v", n, err) verify := func(head *types.Block) { - if chain.CurrentBlock().Hash() != head.Hash() { - require.Fail(t, fmt.Sprintf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash())) - } - if chain.CurrentHeader().Hash() != head.Hash() { - require.Fail(t, fmt.Sprintf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash())) - } - if !chain.HasState(head.Root()) { - require.Fail(t, fmt.Sprintf("Lost block state %v %x", head.Number(), head.Hash())) - } + require.Equal(t, head.Hash(), chain.CurrentBlock().Hash(), "Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash()) + require.Equal(t, head.Hash(), chain.CurrentHeader().Hash(), "Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash()) + require.True(t, chain.HasState(head.Root()), "Lost block state %v %x", head.Number(), head.Hash()) } // Switch canonical chain to forkB if necessary @@ -583,16 +568,12 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { for i := 0; i < len(forkB); i++ { block := forkB[i] hash := chain.GetCanonicalHash(block.NumberU64()) - if hash != block.Hash() { - require.Fail(t, fmt.Sprintf("Unexpected canonical hash %d", block.NumberU64())) - } + require.Equal(t, block.Hash(), hash, "Unexpected canonical hash %d", block.NumberU64()) } if c.forkA > c.forkB { for i := uint64(c.forkB) + 1; i <= uint64(c.forkA); i++ { hash := chain.GetCanonicalHash(i) - if hash != (common.Hash{}) { - require.Fail(t, fmt.Sprintf("Unexpected canonical hash %d", i)) - } + require.Zero(t, hash, "Unexpected canonical hash %d", i) } } chain.Stop() @@ -733,15 +714,12 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) { // Debug: true, // Tracer: logger.NewJSONLogger(nil, os.Stdout), }, common.Hash{}, false) - if err != nil { - require.Fail(t, fmt.Sprintf("failed to create tester chain: %v", err)) - } + require.NoError(t, err, "failed to create tester chain: %v", err) defer chain.Stop() // Import the blocks for _, block := range blocks { - if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - require.Fail(t, fmt.Sprintf("block %d: failed to insert into chain: %v", block.NumberU64(), err)) - } + _, err = chain.InsertChain([]*types.Block{block}) + require.NoError(t, err, "block %d: failed to insert into chain: %v", block.NumberU64(), err) } } @@ -843,19 +821,15 @@ func TestDeleteThenCreate(t *testing.T) { nonce++ } }) - if err != nil { - require.NoError(t, err) - } + require.NoError(t, err, "failed to generate chain: %v", err) + // Import the canonical chain chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) - if err != nil { - require.Fail(t, fmt.Sprintf("failed to create tester chain: %v", err)) - } + require.NoError(t, err, "failed to create tester chain: %v", err) defer chain.Stop() for _, block := range blocks { - if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - require.Fail(t, fmt.Sprintf("block %d: failed to insert into chain: %v", block.NumberU64(), err)) - } + _, err = chain.InsertChain([]*types.Block{block}) + require.NoError(t, err, "block %d: failed to insert into chain: %v", block.NumberU64(), err) } } @@ -935,22 +909,17 @@ func TestTransientStorageReset(t *testing.T) { // Initialize the blockchain with 1153 enabled. chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vmConfig, common.Hash{}, false) - if err != nil { - require.Fail(t, fmt.Sprintf("failed to create tester chain: %v", err)) - } + require.NoError(t, err, "failed to create tester chain: %v", err) defer chain.Stop() // Import the blocks - if _, err := chain.InsertChain(blocks); err != nil { - require.Fail(t, fmt.Sprintf("failed to insert into chain: %v", err)) - } + _, err = chain.InsertChain(blocks) + require.NoError(t, err, "failed to insert into chain: %v", err) // Check the storage state, err := chain.StateAt(chain.CurrentHeader().Root) - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to load state %v", err)) - } + require.NoError(t, err, "failed to load state: %v", err) loc := common.BytesToHash([]byte{1}) slot := state.GetState(destAddress, loc) - require.Equal(t, common.Hash{}, slot, "Unexpected dirty storage slot") + require.Zero(t, slot, "Unexpected dirty storage slot") } func TestEIP3651(t *testing.T) { @@ -1023,22 +992,17 @@ func TestEIP3651(t *testing.T) { b.AddTx(tx) }) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr)}, common.Hash{}, false) - if err != nil { - require.Fail(t, fmt.Sprintf("failed to create tester chain: %v", err)) - } + require.NoError(t, err, "failed to create tester chain: %v", err) defer chain.Stop() - if n, err := chain.InsertChain(blocks); err != nil { - require.Fail(t, fmt.Sprintf("block %d: failed to insert into chain: %v", n, err)) - } + _, err = chain.InsertChain(blocks) + require.NoError(t, err, "failed to insert into chain: %v", err) block := chain.GetBlockByNumber(1) // 1+2: Ensure EIP-1559 access lists are accounted for via gas usage. innerGas := vm.GasQuickStep*2 + ethparams.ColdSloadCostEIP2929*2 expectedGas := ethparams.TxGas + 5*vm.GasFastestStep + vm.GasQuickStep + 100 + innerGas // 100 because 0xaaaa is in access list - if block.GasUsed() != expectedGas { - require.Fail(t, fmt.Sprintf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed())) - } + require.Equal(t, expectedGas, block.GasUsed(), "incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed()) state, _ := chain.State() @@ -1050,15 +1014,11 @@ func TestEIP3651(t *testing.T) { tx := block.Transactions()[0] gasPrice := new(big.Int).Add(block.BaseFee(), tx.EffectiveGasTipValue(block.BaseFee())) expected := new(big.Int).SetUint64(block.GasUsed() * gasPrice.Uint64()) - if actual.Cmp(expected) != 0 { - require.Fail(t, fmt.Sprintf("miner balance incorrect: expected %d, got %d", expected, actual)) - } + require.Equal(t, expected, actual, "miner balance incorrect: expected %d, got %d", expected, actual) // 4: Ensure the tx sender paid for the gasUsed * (block baseFee + effectiveGasTip). // Note this differs from go-ethereum where the miner receives the gasUsed * block baseFee, // as our handling of the coinbase payment is different. actual = new(big.Int).Sub(funds, state.GetBalance(addr1).ToBig()) - if actual.Cmp(expected) != 0 { - require.Fail(t, fmt.Sprintf("sender balance incorrect: expected %d, got %d", expected, actual)) - } + require.Equal(t, expected, actual, "sender balance incorrect: expected %d, got %d", expected, actual) } diff --git a/core/state_processor_ext_test.go b/core/state_processor_ext_test.go index 632a3313a7..4b9db6e62b 100644 --- a/core/state_processor_ext_test.go +++ b/core/state_processor_ext_test.go @@ -4,7 +4,6 @@ package core import ( - "fmt" "math/big" "testing" @@ -100,9 +99,6 @@ func TestBadTxAllowListBlock(t *testing.T) { } { block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) - require.Error(t, err, "block imported without errors") - if have, want := err.Error(), tt.want; have != want { - require.Fail(t, fmt.Sprintf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want)) - } + require.EqualError(t, err, tt.want, "test %d: unexpected error message", i) } } diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 688f45b1df..a9952b36fc 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -152,9 +152,8 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { require.NoError(t, syncDisabledVM.Shutdown(context.Background())) }() - if height := syncDisabledVM.LastAcceptedBlockInternal().Height(); height != 0 { - require.Fail(t, fmt.Sprintf("Unexpected last accepted height: %d", height)) - } + height := syncDisabledVM.LastAcceptedBlockInternal().Height() + require.NotZero(t, height, "Unexpected last accepted height: %d", height) enabled, err := syncDisabledVM.StateSyncEnabled(context.Background()) require.NoError(t, err) @@ -163,9 +162,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { // Process the first 10 blocks from the serverVM for i := uint64(1); i < 10; i++ { ethBlock := vmSetup.serverVM.blockChain.GetBlockByNumber(i) - if ethBlock == nil { - require.Fail(t, fmt.Sprintf("VM Server did not have a block available at height %d", i)) - } + require.NotNil(t, ethBlock, "VM Server did not have a block available at height %d", i) b, err := rlp.EncodeToBytes(ethBlock) require.NoError(t, err) blk, err := syncDisabledVM.ParseBlock(context.Background(), b) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 313df6a209..7bc06cc862 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -7,7 +7,6 @@ import ( "context" "crypto/ecdsa" "encoding/json" - "errors" "fmt" "math/big" "os" @@ -384,16 +383,12 @@ func testBuildEthTxBlock(t *testing.T, scheme string) { require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } blk1 := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk1.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(blk1.ID()), newHead.Head.Hash(), "Expected new block to match") txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { @@ -404,28 +399,21 @@ func testBuildEthTxBlock(t *testing.T, scheme string) { } errs = tvm.vm.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } tvm.vm.clock.Set(tvm.vm.clock.Time().Add(2 * time.Second)) blk2 := issueAndAccept(t, tvm.vm) newHead = <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk2.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(blk2.ID()), newHead.Head.Hash(), "Expected new block to match") lastAcceptedID, err := tvm.vm.LastAccepted(context.Background()) require.NoError(t, err) - if lastAcceptedID != blk2.ID() { - require.Fail(t, fmt.Sprintf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID)) - } + require.Equal(t, blk2.ID(), lastAcceptedID, "Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID) ethBlk1 := blk1.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock - if ethBlk1Root := ethBlk1.Root(); !tvm.vm.blockChain.HasState(ethBlk1Root) { - require.Fail(t, "Expected blk1 state root to not yet be pruned after blk2 was accepted because of tip buffer") - } + ethBlk1Root := ethBlk1.Root() + require.True(t, tvm.vm.blockChain.HasState(ethBlk1Root), "Expected blk1 state root to not yet be pruned after blk2 was accepted because of tip buffer") // Clear the cache and ensure that GetBlock returns internal blocks with the correct status tvm.vm.State.Flush() @@ -436,9 +424,7 @@ func testBuildEthTxBlock(t *testing.T, scheme string) { blk1Refreshed, err := tvm.vm.GetBlockInternal(context.Background(), blk1RefreshedID) require.NoError(t, err) - if blk1Refreshed.ID() != blk1.ID() { - require.Fail(t, "Found unexpected blkID for parent of blk2") - } + require.Equal(t, blk1.ID(), blk1Refreshed.ID(), "Found unexpected blkID for parent of blk2") // Close the vm and all databases require.NoError(t, tvm.vm.Shutdown(context.Background())) @@ -460,15 +446,10 @@ func testBuildEthTxBlock(t *testing.T, scheme string) { )) // State root should not have been committed and discarded on restart - if ethBlk1Root := ethBlk1.Root(); restartedVM.blockChain.HasState(ethBlk1Root) { - require.Fail(t, "Expected blk1 state root to be pruned after blk2 was accepted on top of it in pruning mode") - } + require.False(t, restartedVM.blockChain.HasState(ethBlk1Root), "Expected blk1 state root to be pruned after blk2 was accepted on top of it in pruning mode") // State root should be committed when accepted tip on shutdown - ethBlk2 := blk2.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock - if ethBlk2Root := ethBlk2.Root(); !restartedVM.blockChain.HasState(ethBlk2Root) { - require.Fail(t, "Expected blk2 state root to not be pruned after shutdown (last accepted tip should be committed)") - } + require.True(t, restartedVM.blockChain.HasState(blk2.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Root()), "Expected blk2 state root to not be pruned after shutdown (last accepted tip should be committed)") // Shutdown the newest VM require.NoError(t, restartedVM.Shutdown(context.Background())) @@ -522,9 +503,7 @@ func testSetPreferenceRace(t *testing.T, scheme string) { txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := vm1.WaitForEvent(context.Background()) @@ -547,13 +526,9 @@ func testSetPreferenceRace(t *testing.T, scheme string) { require.NoError(t, vm2BlkA.Accept(context.Background()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 - if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm1BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") // Create list of 10 successive transactions to build block A on vm1 // and to be split into two separate blocks on VM2 @@ -570,9 +545,7 @@ func testSetPreferenceRace(t *testing.T, scheme string) { // Add the remote transactions, build the block, and set VM1's preference for block A errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM1 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM1 at index %d: %s", i, err) } msg, err = vm1.WaitForEvent(context.Background()) @@ -591,9 +564,7 @@ func testSetPreferenceRace(t *testing.T, scheme string) { // Block C errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(context.Background()) @@ -608,16 +579,12 @@ func testSetPreferenceRace(t *testing.T, scheme string) { require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkC.ID())) newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkC.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkC.ID()), newHead.Head.Hash(), "Expected new block to match") // Block D errs = vm2.txPool.AddRemotesSync(txs[5:10]) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(context.Background()) @@ -714,9 +681,7 @@ func testReorgProtection(t *testing.T, scheme string) { txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := vm1.WaitForEvent(context.Background()) @@ -739,13 +704,9 @@ func testReorgProtection(t *testing.T, scheme string) { require.NoError(t, vm2BlkA.Accept(context.Background()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 - if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm1BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") // Create list of 10 successive transactions to build block A on vm1 // and to be split into two separate blocks on VM2 @@ -762,9 +723,7 @@ func testReorgProtection(t *testing.T, scheme string) { // Add the remote transactions, build the block, and set VM1's preference for block A errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM1 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM1 at index %d: %s", i, err) } msg, err = vm1.WaitForEvent(context.Background()) @@ -783,9 +742,7 @@ func testReorgProtection(t *testing.T, scheme string) { // Block C errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(context.Background()) @@ -860,9 +817,7 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := vm1.WaitForEvent(context.Background()) @@ -874,41 +829,32 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { require.NoError(t, vm1BlkA.Verify(context.Background()), "Block failed verification on VM1") - if _, err := vm1.GetBlockIDAtHeight(context.Background(), vm1BlkA.Height()); err != database.ErrNotFound { - require.Fail(t, fmt.Sprintf("Expected unaccepted block not to be indexed by height, but found %s", err)) - } + _, err = vm1.GetBlockIDAtHeight(context.Background(), vm1BlkA.Height()) + require.ErrorIs(t, err, database.ErrNotFound, "Expected unaccepted block not to be indexed by height, but found %s", err) require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkA.ID())) vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) require.NoError(t, err, "Unexpected error parsing block from vm2") require.NoError(t, vm2BlkA.Verify(context.Background()), "Block failed verification on VM2") - if _, err := vm2.GetBlockIDAtHeight(context.Background(), vm2BlkA.Height()); err != database.ErrNotFound { - require.Fail(t, fmt.Sprintf("Expected unaccepted block not to be indexed by height, but found %s", err)) - } + _, err = vm2.GetBlockIDAtHeight(context.Background(), vm2BlkA.Height()) + require.ErrorIs(t, err, database.ErrNotFound, "Expected unaccepted block not to be indexed by height, but found %s", err) require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkA.ID())) require.NoError(t, vm1BlkA.Accept(context.Background()), "VM1 failed to accept block") - if blkID, err := vm1.GetBlockIDAtHeight(context.Background(), vm1BlkA.Height()); err != nil { - require.NoError(t, err, "Height lookuped failed on accepted block") - } else if blkID != vm1BlkA.ID() { - require.Fail(t, fmt.Sprintf("Expected accepted block to be indexed by height, but found %s", blkID)) - } + blkID, err := vm1.GetBlockIDAtHeight(context.Background(), vm1BlkA.Height()) + require.NoError(t, err, "Height lookuped failed on accepted block") + require.Equal(t, vm1BlkA.ID(), blkID, "Expected accepted block to be indexed by height, but found %s", blkID) + require.NoError(t, vm2BlkA.Accept(context.Background()), "VM2 failed to accept block") - if blkID, err := vm2.GetBlockIDAtHeight(context.Background(), vm2BlkA.Height()); err != nil { - require.NoError(t, err, "Height lookuped failed on accepted block") - } else if blkID != vm2BlkA.ID() { - require.Fail(t, fmt.Sprintf("Expected accepted block to be indexed by height, but found %s", blkID)) - } + blkID, err = vm2.GetBlockIDAtHeight(context.Background(), vm2BlkA.Height()) + require.NoError(t, err, "Height lookuped failed on accepted block") + require.Equal(t, vm2BlkA.ID(), blkID, "Expected accepted block to be indexed by height, but found %s", blkID) newHead := <-newTxPoolHeadChan1 - if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm1BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") // Create list of 10 successive transactions to build block A on vm1 // and to be split into two separate blocks on VM2 @@ -925,9 +871,7 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { // Add the remote transactions, build the block, and set VM1's preference for block A errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM1 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM1 at index %d: %s", i, err) } msg, err = vm1.WaitForEvent(context.Background()) @@ -939,23 +883,18 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { require.NoError(t, vm1BlkB.Verify(context.Background())) - if _, err := vm1.GetBlockIDAtHeight(context.Background(), vm1BlkB.Height()); err != database.ErrNotFound { - require.Fail(t, fmt.Sprintf("Expected unaccepted block not to be indexed by height, but found %s", err)) - } + _, err = vm1.GetBlockIDAtHeight(context.Background(), vm1BlkB.Height()) + require.ErrorIs(t, err, database.ErrNotFound, "Expected unaccepted block not to be indexed by height, but found %s", err) require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkB.ID())) blkBHeight := vm1BlkB.Height() blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { - require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex())) - } + require.Equal(t, blkBHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(context.Background()) @@ -970,22 +909,17 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { require.NoError(t, vm1BlkC.Verify(context.Background()), "Block failed verification on VM1") - if _, err := vm1.GetBlockIDAtHeight(context.Background(), vm1BlkC.Height()); err != database.ErrNotFound { - require.Fail(t, fmt.Sprintf("Expected unaccepted block not to be indexed by height, but found %s", err)) - } + _, err = vm1.GetBlockIDAtHeight(context.Background(), vm1BlkC.Height()) + require.ErrorIs(t, err, database.ErrNotFound, "Expected unaccepted block not to be indexed by height, but found %s", err) require.NoError(t, vm1BlkC.Accept(context.Background()), "VM1 failed to accept block") - if blkID, err := vm1.GetBlockIDAtHeight(context.Background(), vm1BlkC.Height()); err != nil { - require.NoError(t, err, "Height lookuped failed on accepted block") - } else if blkID != vm1BlkC.ID() { - require.Fail(t, fmt.Sprintf("Expected accepted block to be indexed by height, but found %s", blkID)) - } + blkID, err = vm1.GetBlockIDAtHeight(context.Background(), vm1BlkC.Height()) + require.NoError(t, err, "Height lookuped failed on accepted block") + require.Equal(t, vm1BlkC.ID(), blkID, "Expected accepted block to be indexed by height, but found %s", blkID) blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { - require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex())) - } + require.Equal(t, blkCHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) } // Regression test to ensure that a VM that verifies block B, C, then @@ -1033,9 +967,7 @@ func testStickyPreference(t *testing.T, scheme string) { txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := vm1.WaitForEvent(context.Background()) @@ -1058,13 +990,9 @@ func testStickyPreference(t *testing.T, scheme string) { require.NoError(t, vm2BlkA.Accept(context.Background()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 - if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm1BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") // Create list of 10 successive transactions to build block A on vm1 // and to be split into two separate blocks on VM2 @@ -1081,9 +1009,7 @@ func testStickyPreference(t *testing.T, scheme string) { // Add the remote transactions, build the block, and set VM1's preference for block A errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM1 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM1 at index %d: %s", i, err) } msg, err = vm1.WaitForEvent(context.Background()) @@ -1099,15 +1025,11 @@ func testStickyPreference(t *testing.T, scheme string) { blkBHeight := vm1BlkB.Height() blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { - require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex())) - } + require.Equal(t, blkBHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(context.Background()) @@ -1122,15 +1044,11 @@ func testStickyPreference(t *testing.T, scheme string) { require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkC.ID())) newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkC.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkC.ID()), newHead.Head.Hash(), "Expected new block to match") errs = vm2.txPool.AddRemotesSync(txs[5:]) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(context.Background()) @@ -1153,61 +1071,35 @@ func testStickyPreference(t *testing.T, scheme string) { // Should be no-ops require.NoError(t, vm1BlkC.Verify(context.Background()), "Block failed verification on VM1") require.NoError(t, vm1BlkD.Verify(context.Background()), "Block failed verification on VM1") - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { - require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex())) - } - if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b != nil { - require.Fail(t, fmt.Sprintf("expected block at %d to be nil but got %s", blkDHeight, b.Hash().Hex())) - } - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkBHash { - require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex())) - } + require.Equal(t, blkBHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) + require.Nil(t, vm1.blockChain.GetBlockByNumber(blkDHeight), "expected block at %d to be nil but got %s", blkDHeight, vm1.blockChain.GetBlockByNumber(blkDHeight).Hash().Hex()) + require.Equal(t, blkBHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkBHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) // Should still be no-ops on re-verify require.NoError(t, vm1BlkC.Verify(context.Background()), "Block failed verification on VM1") require.NoError(t, vm1BlkD.Verify(context.Background()), "Block failed verification on VM1") - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { - require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), b.Hash().Hex())) - } - if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b != nil { - require.Fail(t, fmt.Sprintf("expected block at %d to be nil but got %s", blkDHeight, b.Hash().Hex())) - } - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkBHash { - require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex())) - } + require.Equal(t, blkBHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) + require.Nil(t, vm1.blockChain.GetBlockByNumber(blkDHeight), "expected block at %d to be nil but got %s", blkDHeight, vm1.blockChain.GetBlockByNumber(blkDHeight).Hash().Hex()) + require.Equal(t, blkBHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkBHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) // Should be queryable after setting preference to side chain require.NoError(t, vm1.SetPreference(context.Background(), vm1BlkD.ID())) - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { - require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex())) - } - if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b.Hash() != blkDHash { - require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), b.Hash().Hex())) - } - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkDHash { - require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex())) - } + require.Equal(t, blkCHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) + require.Equal(t, blkDHash, vm1.blockChain.GetBlockByNumber(blkDHeight).Hash(), "expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), vm1.blockChain.GetBlockByNumber(blkDHeight).Hash().Hex()) + require.Equal(t, blkDHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkDHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) // Attempt to accept out of order - if err := vm1BlkD.Accept(context.Background()); !strings.Contains(err.Error(), "expected accepted block to have parent") { - require.NoError(t, err, "unexpected error when accepting out of order block") - } + require.ErrorContains(t, vm1BlkD.Accept(context.Background()), "expected accepted block to have parent", "unexpected error when accepting out of order block") // Accept in order require.NoError(t, vm1BlkC.Accept(context.Background()), "Block failed verification on VM1") require.NoError(t, vm1BlkD.Accept(context.Background()), "Block failed acceptance on VM1") // Ensure queryable after accepting - if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkCHash { - require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), b.Hash().Hex())) - } - if b := vm1.blockChain.GetBlockByNumber(blkDHeight); b.Hash() != blkDHash { - require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), b.Hash().Hex())) - } - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkDHash { - require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex())) - } + require.Equal(t, blkCHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkCHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) + require.Equal(t, blkDHash, vm1.blockChain.GetBlockByNumber(blkDHeight).Hash(), "expected block at %d to have hash %s but got %s", blkDHeight, blkDHash.Hex(), vm1.blockChain.GetBlockByNumber(blkDHeight).Hash().Hex()) + require.Equal(t, blkDHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkDHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) } // Regression test to ensure that a VM that prefers block B is able to parse @@ -1254,9 +1146,7 @@ func testUncleBlock(t *testing.T, scheme string) { txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := vm1.WaitForEvent(context.Background()) @@ -1279,13 +1169,9 @@ func testUncleBlock(t *testing.T, scheme string) { require.NoError(t, vm2BlkA.Accept(context.Background()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 - if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm1BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") txs := make([]*types.Transaction, 10) for i := 0; i < 10; i++ { @@ -1299,9 +1185,7 @@ func testUncleBlock(t *testing.T, scheme string) { errs = vm1.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM1 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM1 at index %d: %s", i, err) } msg, err = vm1.WaitForEvent(context.Background()) @@ -1317,9 +1201,7 @@ func testUncleBlock(t *testing.T, scheme string) { errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(context.Background()) @@ -1334,15 +1216,11 @@ func testUncleBlock(t *testing.T, scheme string) { require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkC.ID())) newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkC.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkC.ID()), newHead.Head.Hash(), "Expected new block to match") errs = vm2.txPool.AddRemotesSync(txs[5:10]) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(context.Background()) @@ -1367,14 +1245,11 @@ func testUncleBlock(t *testing.T, scheme string) { ) uncleBlock, _ := wrapBlock(uncleEthBlock, tvm2.vm) - if err := uncleBlock.Verify(context.Background()); !errors.Is(err, errUnclesUnsupported) { - require.Fail(t, fmt.Sprintf("VM2 should have failed with %q but got %q", errUnclesUnsupported, err.Error())) - } - if _, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()); err != nil { - require.NoError(t, err, "VM1 errored parsing blkC") - } + require.ErrorIs(t, uncleBlock.Verify(context.Background()), errUnclesUnsupported, "VM2 should have failed with %q but got %q", errUnclesUnsupported, err.Error()) + _, err = vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) + require.NoError(t, err, "VM1 errored parsing blkC") _, err = vm1.ParseBlock(context.Background(), uncleBlock.Bytes()) - require.ErrorIs(t, err, errUnclesUnsupported) + require.ErrorIs(t, err, errUnclesUnsupported, "VM1 should have failed with %q but got %q", errUnclesUnsupported, err.Error()) } // Regression test to ensure that a VM that is not able to parse a block that @@ -1403,9 +1278,7 @@ func testEmptyBlock(t *testing.T, scheme string) { txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := tvm.vm.WaitForEvent(context.Background()) @@ -1429,12 +1302,9 @@ func testEmptyBlock(t *testing.T, scheme string) { emptyBlock, err := wrapBlock(emptyEthBlock, tvm.vm) require.NoError(t, err) - if _, err := tvm.vm.ParseBlock(context.Background(), emptyBlock.Bytes()); !errors.Is(err, errEmptyBlock) { - require.Fail(t, "VM should have failed with errEmptyBlock but got "+err.Error()) - } - if err := emptyBlock.Verify(context.Background()); !errors.Is(err, errEmptyBlock) { - require.Fail(t, "block should have failed verification with errEmptyBlock but got "+err.Error()) - } + _, err = tvm.vm.ParseBlock(context.Background(), emptyBlock.Bytes()) + require.ErrorIs(t, err, errEmptyBlock, "VM should have failed with errEmptyBlock but got "+err.Error()) + require.ErrorIs(t, emptyBlock.Verify(context.Background()), errEmptyBlock, "block should have failed verification with errEmptyBlock but got "+err.Error()) } // Regression test to ensure that a VM that verifies block B, C, then @@ -1481,9 +1351,7 @@ func testAcceptReorg(t *testing.T, scheme string) { txErrors := vm1.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := vm1.WaitForEvent(context.Background()) @@ -1506,13 +1374,9 @@ func testAcceptReorg(t *testing.T, scheme string) { require.NoError(t, vm2BlkA.Accept(context.Background()), "VM2 failed to accept block") newHead := <-newTxPoolHeadChan1 - if newHead.Head.Hash() != common.Hash(vm1BlkA.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm1BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkA.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkA.ID()), newHead.Head.Hash(), "Expected new block to match") // Create list of 10 successive transactions to build block A on vm1 // and to be split into two separate blocks on VM2 @@ -1528,9 +1392,7 @@ func testAcceptReorg(t *testing.T, scheme string) { // for block B errs := vm1.txPool.AddRemotesSync(txs) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM1 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM1 at index %d: %s", i, err) } msg, err = vm1.WaitForEvent(context.Background()) @@ -1546,9 +1408,7 @@ func testAcceptReorg(t *testing.T, scheme string) { errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(context.Background()) @@ -1563,15 +1423,11 @@ func testAcceptReorg(t *testing.T, scheme string) { require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkC.ID())) newHead = <-newTxPoolHeadChan2 - if newHead.Head.Hash() != common.Hash(vm2BlkC.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(vm2BlkC.ID()), newHead.Head.Hash(), "Expected new block to match") errs = vm2.txPool.AddRemotesSync(txs[5:]) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add transaction to VM2 at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add transaction to VM2 at index %d: %s", i, err) } msg, err = vm2.WaitForEvent(context.Background()) @@ -1592,23 +1448,17 @@ func testAcceptReorg(t *testing.T, scheme string) { require.NoError(t, vm1BlkD.Verify(context.Background()), "Block failed verification on VM1") blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkBHash { - require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex())) - } + require.Equal(t, blkBHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkBHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) require.NoError(t, vm1BlkC.Accept(context.Background())) blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkCHash { - require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkCHash.Hex(), b.Hash().Hex())) - } + require.Equal(t, blkCHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkCHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) require.NoError(t, vm1BlkB.Reject(context.Background())) require.NoError(t, vm1BlkD.Accept(context.Background())) blkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkDHash { - require.Fail(t, fmt.Sprintf("expected current block to have hash %s but got %s", blkDHash.Hex(), b.Hash().Hex())) - } + require.Equal(t, blkDHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkDHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) } func TestTimeSemanticVerify(t *testing.T) { @@ -1679,9 +1529,7 @@ func TestTimeSemanticVerify(t *testing.T) { txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := tvm.vm.WaitForEvent(context.Background()) @@ -1760,9 +1608,7 @@ func TestBuildTimeMilliseconds(t *testing.T) { txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := tvm.vm.WaitForEvent(context.Background()) @@ -1802,9 +1648,7 @@ func testLastAcceptedBlockNumberAllow(t *testing.T, scheme string) { txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := tvm.vm.WaitForEvent(context.Background()) @@ -1826,22 +1670,16 @@ func testLastAcceptedBlockNumberAllow(t *testing.T, scheme string) { ctx := context.Background() b, err := tvm.vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) require.NoError(t, err) - if b.Hash() != blkHash { - require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex())) - } + require.Equal(t, blkHash, b.Hash(), "expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) tvm.vm.eth.APIBackend.SetAllowUnfinalizedQueries(false) _, err = tvm.vm.eth.APIBackend.BlockByNumber(ctx, rpc.BlockNumber(blkHeight)) - if !errors.Is(err, eth.ErrUnfinalizedData) { - require.Fail(t, "expected ErrUnfinalizedData but got "+err.Error()) - } - + require.ErrorIs(t, err, eth.ErrUnfinalizedData, "expected ErrUnfinalizedData but got %s", err) require.NoError(t, blk.Accept(context.Background()), "VM failed to accept block") - if b := tvm.vm.blockChain.GetBlockByNumber(blkHeight); b.Hash() != blkHash { - require.Fail(t, fmt.Sprintf("expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex())) - } + b = tvm.vm.blockChain.GetBlockByNumber(blkHeight) + require.Equal(t, blkHash, b.Hash(), "expected block at %d to have hash %s but got %s", blkHeight, blkHash.Hex(), b.Hash().Hex()) } func TestBuildAllowListActivationBlock(t *testing.T) { @@ -1876,9 +1714,7 @@ func testBuildAllowListActivationBlock(t *testing.T, scheme string) { genesisState, err := tvm.vm.blockChain.StateAt(tvm.vm.blockChain.Genesis().Root()) require.NoError(t, err) role := deployerallowlist.GetContractDeployerAllowListStatus(genesisState, testEthAddrs[0]) - if role != allowlist.NoRole { - require.Fail(t, fmt.Sprintf("Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role)) - } + require.Equal(t, allowlist.NoRole, role, "Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) // Send basic transaction to construct a simple block and confirm that the precompile state configuration in the worker behaves correctly. tx := types.NewTransaction(uint64(0), testEthAddrs[1], new(big.Int).Mul(firstTxAmount, big.NewInt(4)), 21000, big.NewInt(testMinGasPrice*3), nil) @@ -1887,24 +1723,18 @@ func testBuildAllowListActivationBlock(t *testing.T, scheme string) { txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(blk.ID()), newHead.Head.Hash(), "Expected new block to match") // Verify that the allow list config activation was handled correctly in the first block. blkState, err := tvm.vm.blockChain.StateAt(blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Root()) require.NoError(t, err) role = deployerallowlist.GetContractDeployerAllowListStatus(blkState, testEthAddrs[0]) - if role != allowlist.AdminRole { - require.Fail(t, fmt.Sprintf("Expected allow list status to be set role %s, but found: %s", allowlist.AdminRole, role)) - } + require.Equal(t, allowlist.AdminRole, role, "Expected allow list status to be set role %s, but found: %s", allowlist.AdminRole, role) } // Test that the tx allow list allows whitelisted transactions and blocks non-whitelisted addresses @@ -1959,13 +1789,9 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { // Check that address 0 is whitelisted and address 1 is not role := txallowlist.GetTxAllowListStatus(genesisState, testEthAddrs[0]) - if role != allowlist.AdminRole { - require.Fail(t, fmt.Sprintf("Expected allow list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role)) - } + require.Equal(t, allowlist.AdminRole, role, "Expected allow list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role) role = txallowlist.GetTxAllowListStatus(genesisState, testEthAddrs[1]) - if role != allowlist.NoRole { - require.Fail(t, fmt.Sprintf("Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role)) - } + require.Equal(t, allowlist.NoRole, role, "Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) // Should not be a manager role because Durango has not activated yet role = txallowlist.GetTxAllowListStatus(genesisState, managerAddress) require.Equal(t, allowlist.NoRole, role) @@ -1976,9 +1802,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) - if err := errs[0]; err != nil { - require.NoError(t, err, "Failed to add tx at index") - } + require.NoError(t, errs[0], "Failed to add tx at index") // Submit a rejected transaction, should throw an error tx1 := types.NewTransaction(uint64(0), testEthAddrs[1], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) @@ -1986,9 +1810,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { require.NoError(t, err) errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { - require.NoError(t, err, "expected ErrSenderAddressNotAllowListed, got") - } + require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "want %s, got %s", vmerrors.ErrSenderAddressNotAllowListed.Error(), errs[0].Error()) // Submit a rejected transaction, should throw an error because manager is not activated tx2 := types.NewTransaction(uint64(0), managerAddress, big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) @@ -1996,7 +1818,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { require.NoError(t, err) errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2}) - require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed) + require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "want %s, got %s", vmerrors.ErrSenderAddressNotAllowListed.Error(), errs[0].Error()) blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan @@ -2007,9 +1829,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { txs := block.Transactions() - if txs.Len() != 1 { - require.Fail(t, fmt.Sprintf("Expected number of txs to be %d, but found %d", 1, txs.Len())) - } + require.Equal(t, 1, txs.Len(), "Expected number of txs to be %d, but found %d", 1, txs.Len()) require.Equal(t, signedTx0.Hash(), txs[0].Hash()) @@ -2167,13 +1987,9 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { // Check that address 0 is whitelisted and address 1 is not role := txallowlist.GetTxAllowListStatus(genesisState, testEthAddrs[0]) - if role != allowlist.AdminRole { - require.Fail(t, fmt.Sprintf("Expected allow list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role)) - } + require.Equal(t, allowlist.AdminRole, role, "expected allow list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role) role = txallowlist.GetTxAllowListStatus(genesisState, testEthAddrs[1]) - if role != allowlist.NoRole { - require.Fail(t, fmt.Sprintf("Expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role)) - } + require.Equal(t, allowlist.NoRole, role, "expected allow list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) // Submit a successful transaction tx0 := types.NewTransaction(uint64(0), testEthAddrs[0], big.NewInt(1), 21000, big.NewInt(testMinGasPrice), nil) @@ -2181,9 +1997,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx0}) - if err := errs[0]; err != nil { - require.NoError(t, err, "Failed to add tx at index") - } + require.NoError(t, errs[0], "Failed to add tx at index") // Submit a rejected transaction, should throw an error tx1 := types.NewTransaction(uint64(0), testEthAddrs[1], big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) @@ -2191,18 +2005,14 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { require.NoError(t, err) errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - if err := errs[0]; !errors.Is(err, vmerrors.ErrSenderAddressNotAllowListed) { - require.NoError(t, err, "expected ErrSenderAddressNotAllowListed, got") - } + require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "want %s, got %s", vmerrors.ErrSenderAddressNotAllowListed, errs[0]) blk := issueAndAccept(t, tvm.vm) // Verify that the constructed block only has the whitelisted tx block := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs := block.Transactions() - if txs.Len() != 1 { - require.Fail(t, fmt.Sprintf("Expected number of txs to be %d, but found %d", 1, txs.Len())) - } + require.Equal(t, 1, txs.Len(), "Expected number of txs to be %d, but found %d", 1, txs.Len()) require.Equal(t, signedTx0.Hash(), txs[0].Hash()) // verify the issued block is after the network upgrade @@ -2212,9 +2022,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { // retry the rejected Tx, which should now succeed errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - if err := errs[0]; err != nil { - require.NoError(t, err, "Failed to add tx at index") - } + require.NoError(t, errs[0], "Failed to add tx at index") tvm.vm.clock.Set(tvm.vm.clock.Time().Add(2 * time.Second)) // add 2 seconds for gas fee to adjust blk = issueAndAccept(t, tvm.vm) @@ -2222,9 +2030,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { // Verify that the constructed block only has the previously rejected tx block = blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs = block.Transactions() - if txs.Len() != 1 { - require.Fail(t, fmt.Sprintf("Expected number of txs to be %d, but found %d", 1, txs.Len())) - } + require.Equal(t, 1, txs.Len(), "Expected number of txs to be %d, but found %d", 1, txs.Len()) require.Equal(t, signedTx1.Hash(), txs[0].Hash()) } @@ -2271,13 +2077,9 @@ func TestFeeManagerChangeFee(t *testing.T) { // Check that address 0 is whitelisted and address 1 is not role := feemanager.GetFeeManagerStatus(genesisState, testEthAddrs[0]) - if role != allowlist.AdminRole { - require.Fail(t, fmt.Sprintf("Expected fee manager list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role)) - } + require.Equal(t, allowlist.AdminRole, role, "expected fee manager list status to be set to admin: %s, but found: %s", allowlist.AdminRole, role) role = feemanager.GetFeeManagerStatus(genesisState, testEthAddrs[1]) - if role != allowlist.NoRole { - require.Fail(t, fmt.Sprintf("Expected fee manager list status to be set to no role: %s, but found: %s", allowlist.NoRole, role)) - } + require.Equal(t, allowlist.NoRole, role, "expected fee manager list status to be set to no role: %s, but found: %s", allowlist.NoRole, role) // Contract is initialized but no preconfig is given, reader should return genesis fee config feeConfig, lastChangedAt, err := tvm.vm.blockChain.GetFeeConfigAt(tvm.vm.blockChain.Genesis().Header()) require.NoError(t, err) @@ -2306,15 +2108,11 @@ func TestFeeManagerChangeFee(t *testing.T) { require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) - if err := errs[0]; err != nil { - require.NoError(t, err, "Failed to add tx at index") - } + require.NoError(t, errs[0], "Failed to add tx at index") blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(blk.ID()), newHead.Head.Hash(), "Expected new block to match") block := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock @@ -2376,9 +2174,7 @@ func testAllowFeeRecipientDisabled(t *testing.T, scheme string) { txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := tvm.vm.WaitForEvent(context.Background()) @@ -2440,16 +2236,12 @@ func TestAllowFeeRecipientEnabled(t *testing.T) { txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(blk.ID()), newHead.Head.Hash(), "Expected new block to match") ethBlock := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock require.Equal(t, etherBase, ethBlock.Coinbase()) // Verify that etherBase has received fees @@ -2763,16 +2555,12 @@ func TestSkipChainConfigCheckCompatible(t *testing.T) { require.NoError(t, err) errs := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range errs { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan - if newHead.Head.Hash() != common.Hash(blk.ID()) { - require.Fail(t, "Expected new block to match") - } + require.Equal(t, common.Hash(blk.ID()), newHead.Head.Hash(), "Expected new block to match") reinitVM := &VM{} // use the block's timestamp instead of 0 since rewind to genesis @@ -2863,9 +2651,7 @@ func TestParentBeaconRootBlock(t *testing.T) { txErrors := tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx}) for i, err := range txErrors { - if err != nil { - require.Fail(t, fmt.Sprintf("Failed to add tx at index %d: %s", i, err)) - } + require.NoError(t, err, "Failed to add tx at index %d: %s", i, err) } msg, err := tvm.vm.WaitForEvent(context.Background()) diff --git a/precompile/allowlist/allowlisttest/test_allowlist_config.go b/precompile/allowlist/allowlisttest/test_allowlist_config.go index 72b4e2be89..3c45bb1891 100644 --- a/precompile/allowlist/allowlisttest/test_allowlist_config.go +++ b/precompile/allowlist/allowlisttest/test_allowlist_config.go @@ -212,9 +212,8 @@ func VerifyPrecompileWithAllowListTests(t *testing.T, module modules.Module, ver tests := AllowListConfigVerifyTests(t, module) // Add the contract specific tests to the map of tests to run. for name, test := range verifyTests { - if _, exists := tests[name]; exists { - require.Failf(t, "duplicate test name", "name: %s", name) - } + _, exists := tests[name] + require.False(t, exists, "duplicate test name: %s", name) tests[name] = test } @@ -226,9 +225,8 @@ func EqualPrecompileWithAllowListTests(t *testing.T, module modules.Module, equa tests := AllowListConfigEqualTests(t, module) // Add the contract specific tests to the map of tests to run. for name, test := range equalTests { - if _, exists := tests[name]; exists { - require.Failf(t, "duplicate test name", "name: %s", name) - } + _, exists := tests[name] + require.False(t, exists, "duplicate test name: %s", name) tests[name] = test } From 7da2909200cd2e132d2fb900c710b0fb894b6543 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 16:23:20 -0500 Subject: [PATCH 10/46] some Austin comments --- core/state/trie_prefetcher_extra_test.go | 7 ++----- plugin/evm/vm_test.go | 6 +++--- plugin/evm/vm_upgrade_bytes_test.go | 15 +++++++-------- sync/statesync/code_syncer_test.go | 8 +++----- 4 files changed, 15 insertions(+), 21 deletions(-) diff --git a/core/state/trie_prefetcher_extra_test.go b/core/state/trie_prefetcher_extra_test.go index 632aec8420..dd677c9c6b 100644 --- a/core/state/trie_prefetcher_extra_test.go +++ b/core/state/trie_prefetcher_extra_test.go @@ -123,11 +123,8 @@ func BenchmarkPrefetcherDatabase(b *testing.B) { commit(levelDB, snaps, db) b.Logf("Root: %v, kvs: %d, block: %d (committed)", root, count, block) } - if previous != root { - require.NoError(db.TrieDB().Dereference(previous)) - } else { - b.Fail() - } + require.Equal(root, previous, "root and previous should be equal") + require.NoError(db.TrieDB().Dereference(previous)) } require.NoError(levelDB.Close()) b.Log("Starting benchmarks") diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 7bc06cc862..3d178bcc96 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1829,7 +1829,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { txs := block.Transactions() - require.Equal(t, 1, txs.Len(), "Expected number of txs to be %d, but found %d", 1, txs.Len()) + require.Len(t, txs, 1, "Expected number of txs to be %d, but found %d", 1, txs.Len()) require.Equal(t, signedTx0.Hash(), txs[0].Hash()) @@ -2012,7 +2012,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { // Verify that the constructed block only has the whitelisted tx block := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs := block.Transactions() - require.Equal(t, 1, txs.Len(), "Expected number of txs to be %d, but found %d", 1, txs.Len()) + require.Len(t, txs, 1, "Expected number of txs to be %d, but found %d", 1, txs.Len()) require.Equal(t, signedTx0.Hash(), txs[0].Hash()) // verify the issued block is after the network upgrade @@ -2030,7 +2030,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { // Verify that the constructed block only has the previously rejected tx block = blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs = block.Transactions() - require.Equal(t, 1, txs.Len(), "Expected number of txs to be %d, but found %d", 1, txs.Len()) + require.Len(t, txs, 1, "Expected number of txs to be %d, but found %d", 1, txs.Len()) require.Equal(t, signedTx1.Hash(), txs[0].Hash()) } diff --git a/plugin/evm/vm_upgrade_bytes_test.go b/plugin/evm/vm_upgrade_bytes_test.go index dc902b7c7d..3cf4e7a15c 100644 --- a/plugin/evm/vm_upgrade_bytes_test.go +++ b/plugin/evm/vm_upgrade_bytes_test.go @@ -22,7 +22,6 @@ import ( "github.com/ava-labs/libevm/core/types" "github.com/ava-labs/libevm/crypto" "github.com/holiman/uint256" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/ava-labs/subnet-evm/core" @@ -118,11 +117,11 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { // Verify that the constructed block only has the whitelisted tx block := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs := block.Transactions() - require.Equal(t, 1, txs.Len(), "Expected number of txs to be %d, but found %d", 1, txs.Len()) - assert.Equal(t, signedTx0.Hash(), txs[0].Hash()) + require.Len(t, txs, 1, "Expected number of txs to be %d, but found %d", 1, txs.Len()) + require.Equal(t, signedTx0.Hash(), txs[0].Hash()) // verify the issued block is after the network upgrade - assert.GreaterOrEqual(t, int64(block.Time()), disableAllowListTimestamp.Unix()) + require.GreaterOrEqual(t, int64(block.Time()), disableAllowListTimestamp.Unix()) <-newTxPoolHeadChan // wait for new head in tx pool @@ -136,8 +135,8 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { // Verify that the constructed block only has the previously rejected tx block = blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs = block.Transactions() - require.Equal(t, 1, txs.Len(), "Expected number of txs to be %d, but found %d", 1, txs.Len()) - assert.Equal(t, signedTx1.Hash(), txs[0].Hash()) + require.Len(t, txs, 1, "Expected number of txs to be %d, but found %d", 1, txs.Len()) + require.Equal(t, signedTx1.Hash(), txs[0].Hash()) } func TestNetworkUpgradesOverridden(t *testing.T) { @@ -195,7 +194,7 @@ func TestNetworkUpgradesOverridden(t *testing.T) { blk := issueAndAccept(t, restartedVM) require.NotNil(t, blk) - require.EqualValues(t, 1, blk.Height()) + require.Equal(t, uint64(1), blk.Height()) // verify upgrade overrides require.True(t, restartedVM.currentRules().IsDurango) @@ -303,7 +302,7 @@ func TestVMStateUpgrade(t *testing.T) { blk := issueAndAccept(t, tvm.vm) require.NotNil(t, blk) - require.EqualValues(t, 1, blk.Height()) + require.Equal(t, uint64(1), blk.Height()) // Verify the state upgrade was applied state, err := tvm.vm.blockChain.State() diff --git a/sync/statesync/code_syncer_test.go b/sync/statesync/code_syncer_test.go index 6d22740eba..eea0cd7b28 100644 --- a/sync/statesync/code_syncer_test.go +++ b/sync/statesync/code_syncer_test.go @@ -70,13 +70,11 @@ func testCodeSyncer(t *testing.T, test codeSyncerTest) { err := <-codeSyncer.Done() if test.err != nil { - if err == nil { - require.Fail(t, "expected non-nil error", "error: %s", test.err) + require.ErrorIs(t, err, test.err) + if err != nil { + return } - assert.ErrorIs(t, err, test.err) - return } - require.NoError(t, err) // Assert that the client synced the code correctly. for i, codeHash := range codeHashes { From 54688c770cb611cfec8f54b80c59bd0b6edb14e0 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 16:29:38 -0500 Subject: [PATCH 11/46] More Austin comments --- plugin/evm/syncervm_test.go | 20 ++++++-------------- plugin/evm/vm_test.go | 2 +- precompile/contracts/warp/predicate_test.go | 10 ++++------ warp/messages/codec.go | 5 ++--- warp/verifier_backend_test.go | 10 ++-------- 5 files changed, 15 insertions(+), 32 deletions(-) diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index a9952b36fc..0973805fc0 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -102,12 +102,8 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { reqCount++ // Fail all requests after number 50 to interrupt the sync if reqCount > 50 { - if err := syncerVM.AppRequestFailed(context.Background(), nodeID, requestID, commonEng.ErrTimeout); err != nil { - panic(err) - } - if err := syncerVM.Client.Shutdown(); err != nil { - panic(err) - } + require.NoError(t, syncerVM.AppRequestFailed(context.Background(), nodeID, requestID, commonEng.ErrTimeout)) + require.NoError(t, syncerVM.Client.Shutdown()) } else { syncerVM.AppResponse(context.Background(), nodeID, requestID, response) } @@ -271,14 +267,12 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s generateAndAcceptBlocks(t, serverVM.vm, numBlocks, func(_ int, gen *core.BlockGen) { br := predicate.BlockResults{} b, err := br.Bytes() - if err != nil { - panic(err) - } + require.NoError(err) gen.AppendExtra(b) tx := types.NewTransaction(gen.TxNonce(testEthAddrs[0]), testEthAddrs[1], common.Big1, ethparams.TxGas, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.vm.chainConfig.ChainID), testKeys[0].ToECDSA()) - require.NoError(err) + require.NoError(err, "failed to sign transaction") gen.AddTx(signedTx) }, nil) @@ -458,15 +452,13 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, gen *core.BlockGen) { br := predicate.BlockResults{} b, err := br.Bytes() - if err != nil { - panic(err) - } + require.NoError(err) gen.AppendExtra(b) i := 0 for k := range fundedAccounts { tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, big.NewInt(testMinGasPrice), nil) signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainConfig.ChainID), k.PrivateKey) - require.NoError(err) + require.NoError(err, "failed to sign transaction") gen.AddTx(signedTx) i++ if i >= txsPerBlock { diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 3d178bcc96..c5fac75c51 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -2030,7 +2030,7 @@ func TestTxAllowListDisablePrecompile(t *testing.T) { // Verify that the constructed block only has the previously rejected tx block = blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock txs = block.Transactions() - require.Len(t, txs, 1, "Expected number of txs to be %d, but found %d", 1, txs.Len()) + require.Equal(t, 1, txs.Len(), "Expected number of txs to be %d, but found %d", 1, txs.Len()) require.Equal(t, signedTx1.Hash(), txs[0].Hash()) } diff --git a/precompile/contracts/warp/predicate_test.go b/precompile/contracts/warp/predicate_test.go index fa69d7cadc..ed5d7266c4 100644 --- a/precompile/contracts/warp/predicate_test.go +++ b/precompile/contracts/warp/predicate_test.go @@ -150,7 +150,7 @@ func (g GasConfig) PredicateGasCost(chunks int, signers int) uint64 { // createWarpMessage constructs a signed warp message using the global variable [unsignedMsg] // and the first [numKeys] signatures from [blsSignatures] -func createWarpMessage(numKeys int) *avalancheWarp.Message { +func createWarpMessage(tb testing.TB, numKeys int) *avalancheWarp.Message { bitSet := set.NewBits() for i := 0; i < numKeys; i++ { bitSet.Add(i) @@ -181,16 +181,14 @@ func createWarpMessage(numKeys int) *avalancheWarp.Message { copy(warpSignature.Signature[:], bls.SignatureToBytes(sig)) warpMsg, err := avalancheWarp.NewMessage(unsignedMsg, warpSignature) - if err != nil { - panic(err) - } + require.NoError(tb, err) return warpMsg } // createPredicate constructs a warp message using createWarpMessage with numKeys signers // and packs it into predicate encoding. -func createPredicate(numKeys int) predicate.Predicate { - warpMsg := createWarpMessage(numKeys) +func createPredicate(tb testing.TB, numKeys int) predicate.Predicate { + warpMsg := createWarpMessage(t, numKeys) return predicate.New(warpMsg.Bytes()) } diff --git a/warp/messages/codec.go b/warp/messages/codec.go index 968601af39..d45c32317d 100644 --- a/warp/messages/codec.go +++ b/warp/messages/codec.go @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils/units" + "github.com/stretchr/testify/require" ) const ( @@ -27,7 +28,5 @@ func init() { lc.RegisterType(&ValidatorUptime{}), Codec.RegisterCodec(CodecVersion, lc), ) - if err != nil { - panic(err) - } + require.NoError(t, err) } diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index 6a33bb9eab..59c9e31283 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -159,15 +159,9 @@ func TestBlockSignatures(t *testing.T) { toMessageBytes := func(id ids.ID) []byte { idPayload, err := payload.NewHash(id) - if err != nil { - panic(err) - } - + require.NoError(t, err) msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, idPayload.Bytes()) - if err != nil { - panic(err) - } - + require.NoError(t, err) return msg.Bytes() } From 963d58ff1458ba4bd312dfb48f702a635e3e499b Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 16:33:21 -0500 Subject: [PATCH 12/46] Update core/blockchain_ext_test.go Co-authored-by: Austin Larson <78000745+alarso16@users.noreply.github.com> Signed-off-by: Jonathan Oppenheimer <147infiniti@gmail.com> --- core/blockchain_ext_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/blockchain_ext_test.go b/core/blockchain_ext_test.go index 73a2fbfde6..22854f6027 100644 --- a/core/blockchain_ext_test.go +++ b/core/blockchain_ext_test.go @@ -772,7 +772,7 @@ func BuildOnVariousStages(t *testing.T, create createFunc) { checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func EmptyBlocksTest(t *testing.T, create createFunc) { +func EmptyBlocks(t *testing.T, create createFunc) { require := require.New(t) chainDB := rawdb.NewMemoryDatabase() // Ensure that key1 has some funds in the genesis block. From 016a09c132a5d31f4aedb75b5100892ce3e4d63a Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 16:36:49 -0500 Subject: [PATCH 13/46] lint --- plugin/evm/vm_test.go | 22 +++++++++++++++------ precompile/contracts/warp/predicate_test.go | 10 ++++++---- warp/messages/codec.go | 5 +++-- 3 files changed, 25 insertions(+), 12 deletions(-) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index c5fac75c51..89c64fb2ed 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1245,11 +1245,14 @@ func testUncleBlock(t *testing.T, scheme string) { ) uncleBlock, _ := wrapBlock(uncleEthBlock, tvm2.vm) - require.ErrorIs(t, uncleBlock.Verify(context.Background()), errUnclesUnsupported, "VM2 should have failed with %q but got %q", errUnclesUnsupported, err.Error()) + verifyErr := uncleBlock.Verify(context.Background()) + errStr := err.Error() + require.ErrorIs(t, verifyErr, errUnclesUnsupported, "VM2 should have failed with %q but got %q", errUnclesUnsupported, errStr) _, err = vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) require.NoError(t, err, "VM1 errored parsing blkC") _, err = vm1.ParseBlock(context.Background(), uncleBlock.Bytes()) - require.ErrorIs(t, err, errUnclesUnsupported, "VM1 should have failed with %q but got %q", errUnclesUnsupported, err.Error()) + errStr = err.Error() + require.ErrorIs(t, err, errUnclesUnsupported, "VM1 should have failed with %q but got %q", errUnclesUnsupported, errStr) } // Regression test to ensure that a VM that is not able to parse a block that @@ -1303,8 +1306,11 @@ func testEmptyBlock(t *testing.T, scheme string) { require.NoError(t, err) _, err = tvm.vm.ParseBlock(context.Background(), emptyBlock.Bytes()) - require.ErrorIs(t, err, errEmptyBlock, "VM should have failed with errEmptyBlock but got "+err.Error()) - require.ErrorIs(t, emptyBlock.Verify(context.Background()), errEmptyBlock, "block should have failed verification with errEmptyBlock but got "+err.Error()) + errStr := err.Error() + require.ErrorIs(t, err, errEmptyBlock, "VM should have failed with errEmptyBlock but got "+errStr) + verifyErr := emptyBlock.Verify(context.Background()) + errStr = err.Error() + require.ErrorIs(t, verifyErr, errEmptyBlock, "block should have failed verification with errEmptyBlock but got "+errStr) } // Regression test to ensure that a VM that verifies block B, C, then @@ -1810,7 +1816,9 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { require.NoError(t, err) errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "want %s, got %s", vmerrors.ErrSenderAddressNotAllowListed.Error(), errs[0].Error()) + expectedErrStr := vmerrors.ErrSenderAddressNotAllowListed.Error() + gotErrStr := errs[0].Error() + require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "want %s, got %s", expectedErrStr, gotErrStr) // Submit a rejected transaction, should throw an error because manager is not activated tx2 := types.NewTransaction(uint64(0), managerAddress, big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) @@ -1818,7 +1826,9 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { require.NoError(t, err) errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2}) - require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "want %s, got %s", vmerrors.ErrSenderAddressNotAllowListed.Error(), errs[0].Error()) + expectedErrStr = vmerrors.ErrSenderAddressNotAllowListed.Error() + gotErrStr = errs[0].Error() + require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "want %s, got %s", expectedErrStr, gotErrStr) blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan diff --git a/precompile/contracts/warp/predicate_test.go b/precompile/contracts/warp/predicate_test.go index ed5d7266c4..fa69d7cadc 100644 --- a/precompile/contracts/warp/predicate_test.go +++ b/precompile/contracts/warp/predicate_test.go @@ -150,7 +150,7 @@ func (g GasConfig) PredicateGasCost(chunks int, signers int) uint64 { // createWarpMessage constructs a signed warp message using the global variable [unsignedMsg] // and the first [numKeys] signatures from [blsSignatures] -func createWarpMessage(tb testing.TB, numKeys int) *avalancheWarp.Message { +func createWarpMessage(numKeys int) *avalancheWarp.Message { bitSet := set.NewBits() for i := 0; i < numKeys; i++ { bitSet.Add(i) @@ -181,14 +181,16 @@ func createWarpMessage(tb testing.TB, numKeys int) *avalancheWarp.Message { copy(warpSignature.Signature[:], bls.SignatureToBytes(sig)) warpMsg, err := avalancheWarp.NewMessage(unsignedMsg, warpSignature) - require.NoError(tb, err) + if err != nil { + panic(err) + } return warpMsg } // createPredicate constructs a warp message using createWarpMessage with numKeys signers // and packs it into predicate encoding. -func createPredicate(tb testing.TB, numKeys int) predicate.Predicate { - warpMsg := createWarpMessage(t, numKeys) +func createPredicate(numKeys int) predicate.Predicate { + warpMsg := createWarpMessage(numKeys) return predicate.New(warpMsg.Bytes()) } diff --git a/warp/messages/codec.go b/warp/messages/codec.go index d45c32317d..968601af39 100644 --- a/warp/messages/codec.go +++ b/warp/messages/codec.go @@ -9,7 +9,6 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils/units" - "github.com/stretchr/testify/require" ) const ( @@ -28,5 +27,7 @@ func init() { lc.RegisterType(&ValidatorUptime{}), Codec.RegisterCodec(CodecVersion, lc), ) - require.NoError(t, err) + if err != nil { + panic(err) + } } From 324071c9dbebc0bdfb4b3ce1a9185b8f1a9e56ba Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 16:38:40 -0500 Subject: [PATCH 14/46] more cleanup --- core/blockchain_ext_test.go | 2 +- core/blockchain_test.go | 29 +++++++++-------------------- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/core/blockchain_ext_test.go b/core/blockchain_ext_test.go index 22854f6027..ac79ad0615 100644 --- a/core/blockchain_ext_test.go +++ b/core/blockchain_ext_test.go @@ -67,7 +67,7 @@ var tests = []ChainTest{ }, { "EmptyBlocks", - EmptyBlocksTest, + EmptyBlocks, }, { "EmptyAndNonEmptyBlocks", diff --git a/core/blockchain_test.go b/core/blockchain_test.go index ecb47cd0d5..93739a77f0 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -354,9 +354,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { } blockchain, err := createBlockChain(chainDB, pruningConfig, gspec, common.Hash{}) - if err != nil { - require.NoError(t, err) - } + require.NoError(t, err) defer blockchain.Stop() // This call generates a chain of 3 blocks. @@ -365,15 +363,12 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - if err != nil { - require.NoError(t, err) - } + require.NoError(t, err) - if _, err := blockchain.InsertChain(chain); err != nil { - require.NoError(t, err) - } + _, err = blockchain.InsertChain(chain) + require.NoError(t, err) for _, block := range chain { - require.NoError(t, blockchain.Accept(block)) + require.NoError(t, blockchain.Accept(block), "failed to accept block %d", block.NumberU64()) } blockchain.DrainAcceptorQueue() @@ -381,9 +376,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { blockchain.Stop() blockchain, err = createBlockChain(chainDB, pruningConfig, gspec, lastAcceptedHash) - if err != nil { - require.NoError(t, err) - } + require.NoError(t, err, "failed to create blockchain") // Confirm that the node does not have the state for intermediate nodes (exclude the last accepted block) for _, block := range chain[:len(chain)-1] { @@ -409,7 +402,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { gspec, lastAcceptedHash, ) - require.NoError(t, err, "testRepopulateMissingTriesParallel: failed to create blockchain") + require.NoError(t, err, "failed to create blockchain") defer blockchain.Stop() for _, block := range chain { @@ -528,13 +521,9 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { engine = dummy.NewCoinbaseFaker() ) _, forkA, _, err := GenerateChainWithGenesis(gspec, engine, c.forkA, 10, func(int, *BlockGen) {}) - if err != nil { - require.NoError(t, err) - } + require.NoError(t, err, "failed to generate chain A") _, forkB, _, err := GenerateChainWithGenesis(gspec, engine, c.forkB, 10, func(int, *BlockGen) {}) - if err != nil { - require.NoError(t, err) - } + require.NoError(t, err, "failed to generate chain B") // Initialize test chain db := rawdb.NewMemoryDatabase() From dccd0402d43bc111055e1f4cc337d38f467aef11 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 16:43:09 -0500 Subject: [PATCH 15/46] more austin --- eth/tracers/api_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 8edb55bb41..d10be3546a 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -55,6 +55,7 @@ import ( "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/plugin/evm/customrawdb" "github.com/ava-labs/subnet-evm/rpc" + "github.com/stretchr/testify/require" "golang.org/x/exp/slices" ) @@ -438,8 +439,8 @@ func testTraceCall(t *testing.T, scheme string) { t.Errorf("test %d: error mismatch, want '%v', got '%v'", i, testspec.expectErr, err) } } else { + require.ErrorIs(t, err, testspec.expectErr, "test %d", i) if err != nil { - t.Errorf("test %d: expect no error, got %v", i, err) continue } var have *logger.ExecutionResult From 9723436744ba28044270ac9dc4da066897118fe7 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:00:30 -0500 Subject: [PATCH 16/46] Update plugin/evm/customtypes/block_ext_test.go Co-authored-by: Austin Larson <78000745+alarso16@users.noreply.github.com> Signed-off-by: Jonathan Oppenheimer <147infiniti@gmail.com> --- plugin/evm/customtypes/block_ext_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/customtypes/block_ext_test.go b/plugin/evm/customtypes/block_ext_test.go index 7b80f23fa2..6128c12660 100644 --- a/plugin/evm/customtypes/block_ext_test.go +++ b/plugin/evm/customtypes/block_ext_test.go @@ -141,7 +141,7 @@ func exportedFieldsPointToDifferentMemory[T interface { case []uint8: assertDifferentPointers(t, unsafe.SliceData(f), unsafe.SliceData(fieldCp.([]uint8))) default: - require.Fail(t, fmt.Sprintf("field %q type %T needs to be added to switch cases of exportedFieldsDeepCopied", field.Name, f)) + require.Failf(t, "invalid type", "field %q type %T needs to be added to switch cases of exportedFieldsDeepCopied", field.Name, f) } }) } From cea161cb1797d8299d994c7936d05a2b7087c7aa Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:00:54 -0500 Subject: [PATCH 17/46] Update plugin/evm/customtypes/header_ext_test.go Co-authored-by: Austin Larson <78000745+alarso16@users.noreply.github.com> Signed-off-by: Jonathan Oppenheimer <147infiniti@gmail.com> --- plugin/evm/customtypes/header_ext_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/customtypes/header_ext_test.go b/plugin/evm/customtypes/header_ext_test.go index bcaea96f8b..95ccfcd334 100644 --- a/plugin/evm/customtypes/header_ext_test.go +++ b/plugin/evm/customtypes/header_ext_test.go @@ -175,7 +175,7 @@ func allFieldsSet[T interface { case []uint8, []*Header, Transactions, []*Transaction, Withdrawals, []*Withdrawal: assert.NotEmpty(t, f) default: - assert.Failf(t, "Field %q has unsupported type %T", field.Name, f) + assert.Failf(t, "invalid type", "Field %q has unsupported type %T", field.Name, f) } }) } From aa1b9606d58e9de348d1b639ae0de013d3eff318 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:02:02 -0500 Subject: [PATCH 18/46] More Austin suggestions --- eth/tracers/api_extra_test.go | 6 ++---- plugin/evm/customtypes/block_ext_test.go | 12 +++--------- plugin/evm/syncervm_test.go | 4 +--- 3 files changed, 6 insertions(+), 16 deletions(-) diff --git a/eth/tracers/api_extra_test.go b/eth/tracers/api_extra_test.go index be56497e81..1d9d64dacb 100644 --- a/eth/tracers/api_extra_test.go +++ b/eth/tracers/api_extra_test.go @@ -434,11 +434,9 @@ func testTraceCallWithOverridesStateUpgrade(t *testing.T, scheme string) { } else { require.NoError(t, err, "test %d: expect no error", i) var have *logger.ExecutionResult - err = json.Unmarshal(result.(json.RawMessage), &have) - require.NoError(t, err, "test %d: failed to unmarshal result", i) + require.NoError(t, json.Unmarshal(result.(json.RawMessage), &have), "test %d: failed to unmarshal result", i) var want *logger.ExecutionResult - err = json.Unmarshal([]byte(testspec.expect), &want) - require.NoError(t, err, "test %d: failed to unmarshal result", i) + require.NoError(t, json.Unmarshal([]byte(testspec.expect), &want), "test %d: failed to unmarshal result", i) require.Equal(t, want, have, "test %d: result mismatch", i) } } diff --git a/plugin/evm/customtypes/block_ext_test.go b/plugin/evm/customtypes/block_ext_test.go index 6128c12660..a0082a76eb 100644 --- a/plugin/evm/customtypes/block_ext_test.go +++ b/plugin/evm/customtypes/block_ext_test.go @@ -4,7 +4,6 @@ package customtypes import ( - "fmt" "math/big" "reflect" "testing" @@ -151,14 +150,9 @@ func exportedFieldsPointToDifferentMemory[T interface { // pointers pointing to different memory locations. func assertDifferentPointers[T any](t *testing.T, a *T, b any) { t.Helper() - switch { - case a == nil: - require.Fail(t, fmt.Sprintf("a (%T) cannot be nil", a)) - case b == nil: - require.Fail(t, fmt.Sprintf("b (%T) cannot be nil", b)) - case a == b: - require.Fail(t, "pointers to same memory") - } + require.NotNil(t, a, "a (%T) cannot be nil", a) + require.NotNil(t, b, "b (%T) cannot be nil", b) + require.NotSame(t, a, b, "pointers to same memory") // Note: no need to check `b` is of the same type as `a`, otherwise // the memory address would be different as well. } diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 0973805fc0..5a460a28aa 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -124,9 +124,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { nodeID, hasItem := nodeSet.Pop() - if !hasItem { - require.Fail(t, "expected nodeSet to contain at least 1 nodeID") - } + require.True(t, hasItem, "expected nodeSet to contain at least 1 nodeID") go vmSetup.serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) return nil } From 9602fd597bc84550396c905b0879c4767c2fba09 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:05:25 -0500 Subject: [PATCH 19/46] fix sticky test --- plugin/evm/vm_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 89c64fb2ed..c8ab0d4df1 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1072,14 +1072,14 @@ func testStickyPreference(t *testing.T, scheme string) { require.NoError(t, vm1BlkC.Verify(context.Background()), "Block failed verification on VM1") require.NoError(t, vm1BlkD.Verify(context.Background()), "Block failed verification on VM1") require.Equal(t, blkBHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) - require.Nil(t, vm1.blockChain.GetBlockByNumber(blkDHeight), "expected block at %d to be nil but got %s", blkDHeight, vm1.blockChain.GetBlockByNumber(blkDHeight).Hash().Hex()) + require.Nil(t, vm1.blockChain.GetBlockByNumber(blkDHeight), "expected block at %d to be nil", blkDHeight) require.Equal(t, blkBHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkBHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) // Should still be no-ops on re-verify require.NoError(t, vm1BlkC.Verify(context.Background()), "Block failed verification on VM1") require.NoError(t, vm1BlkD.Verify(context.Background()), "Block failed verification on VM1") require.Equal(t, blkBHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) - require.Nil(t, vm1.blockChain.GetBlockByNumber(blkDHeight), "expected block at %d to be nil but got %s", blkDHeight, vm1.blockChain.GetBlockByNumber(blkDHeight).Hash().Hex()) + require.Nil(t, vm1.blockChain.GetBlockByNumber(blkDHeight), "expected block at %d to be nil", blkDHeight) require.Equal(t, blkBHash, vm1.blockChain.CurrentBlock().Hash(), "expected current block to have hash %s but got %s", blkBHash.Hex(), vm1.blockChain.CurrentBlock().Hash().Hex()) // Should be queryable after setting preference to side chain From a863ddaee1df1fcbe6e98b7fc74533cf786a0ed1 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:09:11 -0500 Subject: [PATCH 20/46] Update core/blockchain_ext_test.go Co-authored-by: Austin Larson <78000745+alarso16@users.noreply.github.com> Signed-off-by: Jonathan Oppenheimer <147infiniti@gmail.com> --- core/blockchain_ext_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/core/blockchain_ext_test.go b/core/blockchain_ext_test.go index ac79ad0615..e4ab5942f2 100644 --- a/core/blockchain_ext_test.go +++ b/core/blockchain_ext_test.go @@ -775,7 +775,6 @@ func BuildOnVariousStages(t *testing.T, create createFunc) { func EmptyBlocks(t *testing.T, create createFunc) { require := require.New(t) chainDB := rawdb.NewMemoryDatabase() - // Ensure that key1 has some funds in the genesis block. gspec := &Genesis{ Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: types.GenesisAlloc{}, From 6fd56734afc422b55c24ff391866073840d4cbf2 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:13:16 -0500 Subject: [PATCH 21/46] further simplify api extra test --- eth/tracers/api_extra_test.go | 11 +++-------- eth/tracers/api_test.go | 3 +-- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/eth/tracers/api_extra_test.go b/eth/tracers/api_extra_test.go index 1d9d64dacb..b61e863bf1 100644 --- a/eth/tracers/api_extra_test.go +++ b/eth/tracers/api_extra_test.go @@ -134,17 +134,12 @@ func testTraceBlockPrecompileActivation(t *testing.T, scheme string) { for i, tc := range testSuite { result, err := api.TraceBlockByNumber(context.Background(), tc.blockNumber, tc.config) if tc.expectErr != nil { - if err == nil { - require.Fail(t, fmt.Sprintf("test %d, want error %v", i, tc.expectErr)) - continue - } - require.Equal(t, tc.expectErr, err, "test %d: error mismatch", i) + require.ErrorIs(t, err, tc.expectErr, "test %d", i) continue } - require.NoError(t, err, "test %d, want no error", i) + require.NoError(t, err, "test %d", i) have, _ := json.Marshal(result) - want := tc.want - require.Equal(t, want, string(have), "test %d, result mismatch", i) + require.Equal(t, tc.want, string(have), "test %d", i) } } diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index d10be3546a..8edb55bb41 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -55,7 +55,6 @@ import ( "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/plugin/evm/customrawdb" "github.com/ava-labs/subnet-evm/rpc" - "github.com/stretchr/testify/require" "golang.org/x/exp/slices" ) @@ -439,8 +438,8 @@ func testTraceCall(t *testing.T, scheme string) { t.Errorf("test %d: error mismatch, want '%v', got '%v'", i, testspec.expectErr, err) } } else { - require.ErrorIs(t, err, testspec.expectErr, "test %d", i) if err != nil { + t.Errorf("test %d: expect no error, got %v", i, err) continue } var have *logger.ExecutionResult From 67e5a3582ac66a192f865fc1df8713fcf545e9d9 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:18:27 -0500 Subject: [PATCH 22/46] More Austin comment --- core/state_processor_ext_test.go | 7 ++++--- eth/tracers/api_extra_test.go | 16 +++++++--------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/core/state_processor_ext_test.go b/core/state_processor_ext_test.go index 4b9db6e62b..f101287eba 100644 --- a/core/state_processor_ext_test.go +++ b/core/state_processor_ext_test.go @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/subnet-evm/consensus/dummy" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/params/extras" + "github.com/ava-labs/subnet-evm/plugin/evm/vmerrors" "github.com/ava-labs/subnet-evm/precompile/contracts/txallowlist" "github.com/ava-labs/subnet-evm/utils" @@ -88,17 +89,17 @@ func TestBadTxAllowListBlock(t *testing.T) { defer blockchain.Stop() for i, tt := range []struct { txs []*types.Transaction - want string + want error }{ { // Nonwhitelisted address txs: []*types.Transaction{ mkDynamicTx(0, common.Address{}, ethparams.TxGas, big.NewInt(0), big.NewInt(225000000000)), }, - want: "could not apply tx 0 [0xc5725e8baac950b2925dd4fea446ccddead1cc0affdae18b31a7d910629d9225]: cannot issue transaction from non-allow listed address: 0x71562b71999873DB5b286dF957af199Ec94617F7", + want: vmerrors.ErrSenderAddressNotAllowListed, }, } { block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) - require.EqualError(t, err, tt.want, "test %d: unexpected error message", i) + require.ErrorIs(t, err, tt.want, "test %d", i) } } diff --git a/eth/tracers/api_extra_test.go b/eth/tracers/api_extra_test.go index b61e863bf1..24923a449e 100644 --- a/eth/tracers/api_extra_test.go +++ b/eth/tracers/api_extra_test.go @@ -423,16 +423,14 @@ func testTraceCallWithOverridesStateUpgrade(t *testing.T, scheme string) { } for i, testspec := range testSuite { result, err := api.TraceCall(context.Background(), testspec.call, rpc.BlockNumberOrHash{BlockNumber: &testspec.blockNumber}, testspec.config) - if testspec.expectErr != nil { - require.ErrorIs(t, err, testspec.expectErr, "test %d", i) + require.ErrorIs(t, err, testspec.expectErr, "test %d", i) + if err != nil { continue - } else { - require.NoError(t, err, "test %d: expect no error", i) - var have *logger.ExecutionResult - require.NoError(t, json.Unmarshal(result.(json.RawMessage), &have), "test %d: failed to unmarshal result", i) - var want *logger.ExecutionResult - require.NoError(t, json.Unmarshal([]byte(testspec.expect), &want), "test %d: failed to unmarshal result", i) - require.Equal(t, want, have, "test %d: result mismatch", i) } + var have *logger.ExecutionResult + require.NoError(t, json.Unmarshal(result.(json.RawMessage), &have), "test %d: failed to unmarshal result", i) + var want *logger.ExecutionResult + require.NoError(t, json.Unmarshal([]byte(testspec.expect), &want), "test %d: failed to unmarshal result", i) + require.Equal(t, want, have, "test %d: result mismatch", i) } } From efba90fbc08c09ceb2dc666896e3caec919d263a Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:22:19 -0500 Subject: [PATCH 23/46] simplify test --- eth/tracers/api_extra_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/eth/tracers/api_extra_test.go b/eth/tracers/api_extra_test.go index 24923a449e..493f250230 100644 --- a/eth/tracers/api_extra_test.go +++ b/eth/tracers/api_extra_test.go @@ -133,11 +133,10 @@ func testTraceBlockPrecompileActivation(t *testing.T, scheme string) { } for i, tc := range testSuite { result, err := api.TraceBlockByNumber(context.Background(), tc.blockNumber, tc.config) + require.ErrorIs(t, err, tc.expectErr, "test %d", i) if tc.expectErr != nil { - require.ErrorIs(t, err, tc.expectErr, "test %d", i) continue } - require.NoError(t, err, "test %d", i) have, _ := json.Marshal(result) require.Equal(t, tc.want, string(have), "test %d", i) } From 2efea134139b709ed9595774a131971a5ecacb03 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:24:25 -0500 Subject: [PATCH 24/46] fix uncles test --- plugin/evm/vm_test.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index c8ab0d4df1..34fe881f9d 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1246,13 +1246,11 @@ func testUncleBlock(t *testing.T, scheme string) { uncleBlock, _ := wrapBlock(uncleEthBlock, tvm2.vm) verifyErr := uncleBlock.Verify(context.Background()) - errStr := err.Error() - require.ErrorIs(t, verifyErr, errUnclesUnsupported, "VM2 should have failed with %q but got %q", errUnclesUnsupported, errStr) + require.ErrorIs(t, verifyErr, errUnclesUnsupported) _, err = vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) require.NoError(t, err, "VM1 errored parsing blkC") _, err = vm1.ParseBlock(context.Background(), uncleBlock.Bytes()) - errStr = err.Error() - require.ErrorIs(t, err, errUnclesUnsupported, "VM1 should have failed with %q but got %q", errUnclesUnsupported, errStr) + require.ErrorIs(t, err, errUnclesUnsupported) } // Regression test to ensure that a VM that is not able to parse a block that From 15eddc35ea10f30d43013b653a8c181e18b47c8e Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:35:11 -0500 Subject: [PATCH 25/46] Update plugin/evm/syncervm_test.go Co-authored-by: Austin Larson <78000745+alarso16@users.noreply.github.com> Signed-off-by: Jonathan Oppenheimer <147infiniti@gmail.com> --- plugin/evm/syncervm_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 5a460a28aa..9975cefd08 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -147,7 +147,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { }() height := syncDisabledVM.LastAcceptedBlockInternal().Height() - require.NotZero(t, height, "Unexpected last accepted height: %d", height) + require.Zero(t, height, "Unexpected last accepted height: %d", height) enabled, err := syncDisabledVM.StateSyncEnabled(context.Background()) require.NoError(t, err) From 544e0286219eee304973711148cd625fed0f664a Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:38:25 -0500 Subject: [PATCH 26/46] clean up code syncer test --- scripts/upstream_files.txt | 2 -- sync/statesync/code_syncer_test.go | 22 ++++++++-------------- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/scripts/upstream_files.txt b/scripts/upstream_files.txt index 910aa26d34..55f87092ab 100644 --- a/scripts/upstream_files.txt +++ b/scripts/upstream_files.txt @@ -53,5 +53,3 @@ triedb/* !internal/ethapi/api_extra.go !internal/ethapi/api_extra_test.go !triedb/firewood/* - - diff --git a/sync/statesync/code_syncer_test.go b/sync/statesync/code_syncer_test.go index eea0cd7b28..13bbccd5ce 100644 --- a/sync/statesync/code_syncer_test.go +++ b/sync/statesync/code_syncer_test.go @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/libevm/core/rawdb" "github.com/ava-labs/libevm/crypto" "github.com/ava-labs/libevm/ethdb/memorydb" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/ava-labs/subnet-evm/plugin/evm/customrawdb" @@ -62,24 +61,19 @@ func testCodeSyncer(t *testing.T, test codeSyncerTest) { codeSyncer.start(context.Background()) for _, codeHashes := range test.codeRequestHashes { - if err := codeSyncer.addCode(codeHashes); err != nil { - require.ErrorIs(t, err, test.err) - } + require.NoError(t, codeSyncer.addCode(codeHashes)) } codeSyncer.notifyAccountTrieCompleted() err := <-codeSyncer.Done() - if test.err != nil { - require.ErrorIs(t, err, test.err) - if err != nil { - return - } - } + require.ErrorIs(t, err, test.err) - // Assert that the client synced the code correctly. - for i, codeHash := range codeHashes { - codeBytes := rawdb.ReadCode(clientDB, codeHash) - assert.Equal(t, test.codeByteSlices[i], codeBytes) + // Assert that the client synced the code correctly only if no error was expected. + if test.err == nil { + for i, codeHash := range codeHashes { + codeBytes := rawdb.ReadCode(clientDB, codeHash) + require.Equal(t, test.codeByteSlices[i], codeBytes) + } } } From f209131d869e6f328bf68e9761a80d740eeb8cf6 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:38:47 -0500 Subject: [PATCH 27/46] Update plugin/evm/vm_test.go Co-authored-by: Austin Larson <78000745+alarso16@users.noreply.github.com> Signed-off-by: Jonathan Oppenheimer <147infiniti@gmail.com> --- plugin/evm/vm_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 34fe881f9d..4e126f391d 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1025,7 +1025,8 @@ func testStickyPreference(t *testing.T, scheme string) { blkBHeight := vm1BlkB.Height() blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - require.Equal(t, blkBHash, vm1.blockChain.GetBlockByNumber(blkBHeight).Hash(), "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) + foundBlkBHash := vm1.blockChain.GetBlockByNumber(blkBHeight).Hash() + require.Equal(t, blkBHash, foundBlkBHash, "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { From 784946a7217810e7298e8d3cc9d6d180e68bf935 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:40:50 -0500 Subject: [PATCH 28/46] Austin comments --- plugin/evm/vm_test.go | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 4e126f391d..ea3176d713 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -838,7 +838,7 @@ func testNonCanonicalAccept(t *testing.T, scheme string) { require.NoError(t, err, "Unexpected error parsing block from vm2") require.NoError(t, vm2BlkA.Verify(context.Background()), "Block failed verification on VM2") _, err = vm2.GetBlockIDAtHeight(context.Background(), vm2BlkA.Height()) - require.ErrorIs(t, err, database.ErrNotFound, "Expected unaccepted block not to be indexed by height, but found %s", err) + require.ErrorIs(t, err, database.ErrNotFound) require.NoError(t, vm2.SetPreference(context.Background(), vm2BlkA.ID())) require.NoError(t, vm1BlkA.Accept(context.Background()), "VM1 failed to accept block") @@ -1305,11 +1305,9 @@ func testEmptyBlock(t *testing.T, scheme string) { require.NoError(t, err) _, err = tvm.vm.ParseBlock(context.Background(), emptyBlock.Bytes()) - errStr := err.Error() - require.ErrorIs(t, err, errEmptyBlock, "VM should have failed with errEmptyBlock but got "+errStr) + require.ErrorIs(t, err, errEmptyBlock) verifyErr := emptyBlock.Verify(context.Background()) - errStr = err.Error() - require.ErrorIs(t, verifyErr, errEmptyBlock, "block should have failed verification with errEmptyBlock but got "+errStr) + require.ErrorIs(t, verifyErr, errEmptyBlock) } // Regression test to ensure that a VM that verifies block B, C, then @@ -1815,9 +1813,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { require.NoError(t, err) errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx1}) - expectedErrStr := vmerrors.ErrSenderAddressNotAllowListed.Error() - gotErrStr := errs[0].Error() - require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "want %s, got %s", expectedErrStr, gotErrStr) + require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed) // Submit a rejected transaction, should throw an error because manager is not activated tx2 := types.NewTransaction(uint64(0), managerAddress, big.NewInt(2), 21000, big.NewInt(testMinGasPrice), nil) @@ -1825,9 +1821,7 @@ func TestTxAllowListSuccessfulTx(t *testing.T) { require.NoError(t, err) errs = tvm.vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2}) - expectedErrStr = vmerrors.ErrSenderAddressNotAllowListed.Error() - gotErrStr = errs[0].Error() - require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed, "want %s, got %s", expectedErrStr, gotErrStr) + require.ErrorIs(t, errs[0], vmerrors.ErrSenderAddressNotAllowListed) blk := issueAndAccept(t, tvm.vm) newHead := <-newTxPoolHeadChan From fca96c1fc16c660b827a9c3a133d1911e3b3c92e Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:46:41 -0500 Subject: [PATCH 29/46] remove confusing error checking --- plugin/evm/vm_test.go | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index ea3176d713..34dfc0501b 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -11,7 +11,6 @@ import ( "math/big" "os" "path/filepath" - "strings" "sync" "testing" "time" @@ -766,13 +765,11 @@ func testReorgProtection(t *testing.T, scheme string) { // with the preferred chain lower than the last finalized block) // should NEVER happen. However, the VM defends against this // just in case. - if err := vm1.SetPreference(context.Background(), vm1BlkC.ID()); !strings.Contains(err.Error(), "cannot orphan finalized block") { - require.NoError(t, err, "Unexpected error when setting preference that would trigger reorg") - } + err = vm1.SetPreference(context.Background(), vm1BlkC.ID()) + require.ErrorContains(t, err, "cannot orphan finalized block", "Expected error when setting preference that would orphan finalized block") - if err := vm1BlkC.Accept(context.Background()); !strings.Contains(err.Error(), "expected accepted block to have parent") { - require.NoError(t, err, "Unexpected error when setting block at finalized height") - } + err = vm1BlkC.Accept(context.Background()) + require.ErrorContains(t, err, "expected accepted block to have parent", "Expected error when accepting orphaned block") } // Regression test to ensure that a VM that accepts block C while preferring @@ -1025,8 +1022,8 @@ func testStickyPreference(t *testing.T, scheme string) { blkBHeight := vm1BlkB.Height() blkBHash := vm1BlkB.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock.Hash() - foundBlkBHash := vm1.blockChain.GetBlockByNumber(blkBHeight).Hash() - require.Equal(t, blkBHash, foundBlkBHash, "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) + foundBlkBHash := vm1.blockChain.GetBlockByNumber(blkBHeight).Hash() + require.Equal(t, blkBHash, foundBlkBHash, "expected block at %d to have hash %s but got %s", blkBHeight, blkBHash.Hex(), vm1.blockChain.GetBlockByNumber(blkBHeight).Hash().Hex()) errs = vm2.txPool.AddRemotesSync(txs[0:5]) for i, err := range errs { From 21dca218946e3bf71866f555f69d634cafe70c70 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 17:52:04 -0500 Subject: [PATCH 30/46] austin comments --- plugin/evm/vm_test.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 34dfc0501b..4b17570280 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1538,10 +1538,7 @@ func TestTimeSemanticVerify(t *testing.T) { blk, err := tvm.vm.BuildBlock(context.Background()) require.NoError(t, err, "Failed to build block with import transaction") - - if err := blk.Verify(context.Background()); err != nil { - require.NoError(t, err, "Block failed verification on VM") - } + require.NoError(t, blk.Verify(context.Background()), "Block failed verification on VM") // Create empty block from blkA ethBlk := blk.(*chain.BlockWrapper).Block.(*wrappedBlock).ethBlock From 6f634bce779a2cf22f33d0160f10dd6093dafdcb Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 18:02:48 -0500 Subject: [PATCH 31/46] revert --- sync/statesync/code_syncer_test.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/sync/statesync/code_syncer_test.go b/sync/statesync/code_syncer_test.go index 13bbccd5ce..e0b0fd23f4 100644 --- a/sync/statesync/code_syncer_test.go +++ b/sync/statesync/code_syncer_test.go @@ -61,19 +61,23 @@ func testCodeSyncer(t *testing.T, test codeSyncerTest) { codeSyncer.start(context.Background()) for _, codeHashes := range test.codeRequestHashes { - require.NoError(t, codeSyncer.addCode(codeHashes)) + if err := codeSyncer.addCode(codeHashes); err != nil { + require.ErrorIs(t, err, test.err) + } } codeSyncer.notifyAccountTrieCompleted() err := <-codeSyncer.Done() - require.ErrorIs(t, err, test.err) + if test.err != nil { + require.ErrorIs(t, err, test.err) + return + } + require.NoError(t, err) - // Assert that the client synced the code correctly only if no error was expected. - if test.err == nil { - for i, codeHash := range codeHashes { - codeBytes := rawdb.ReadCode(clientDB, codeHash) - require.Equal(t, test.codeByteSlices[i], codeBytes) - } + // Assert that the client synced the code correctly. + for i, codeHash := range codeHashes { + codeBytes := rawdb.ReadCode(clientDB, codeHash) + require.Equal(t, test.codeByteSlices[i], codeBytes) } } From b6007a9904f8c50273a016728bec7e1d30539cf8 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 18:06:57 -0500 Subject: [PATCH 32/46] remove out of PR scope change --- core/main_test.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/core/main_test.go b/core/main_test.go index 0ebb8f12a7..bd41436cb1 100644 --- a/core/main_test.go +++ b/core/main_test.go @@ -4,10 +4,8 @@ package core import ( - "os" "testing" - "github.com/ava-labs/libevm/log" "go.uber.org/goleak" "github.com/ava-labs/subnet-evm/params" @@ -22,9 +20,6 @@ func TestMain(m *testing.M) { customtypes.Register() params.RegisterExtras() - // May of these tests are likely to fail due to `log.Crit` in goroutines. - log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelCrit, true))) - opts := []goleak.Option{ // No good way to shut down these goroutines: goleak.IgnoreTopFunction("github.com/ava-labs/subnet-evm/core/state/snapshot.(*diskLayer).generate"), From d002836432c2b21dbb35ec994adcf4f98cd2f46a Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 18:08:50 -0500 Subject: [PATCH 33/46] don't fully revert --- sync/statesync/code_syncer_test.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/sync/statesync/code_syncer_test.go b/sync/statesync/code_syncer_test.go index e0b0fd23f4..040853116f 100644 --- a/sync/statesync/code_syncer_test.go +++ b/sync/statesync/code_syncer_test.go @@ -61,18 +61,15 @@ func testCodeSyncer(t *testing.T, test codeSyncerTest) { codeSyncer.start(context.Background()) for _, codeHashes := range test.codeRequestHashes { - if err := codeSyncer.addCode(codeHashes); err != nil { - require.ErrorIs(t, err, test.err) - } + require.ErrorIs(t, codeSyncer.addCode(codeHashes), test.err) } codeSyncer.notifyAccountTrieCompleted() err := <-codeSyncer.Done() + require.ErrorIs(t, err, test.err) if test.err != nil { - require.ErrorIs(t, err, test.err) return } - require.NoError(t, err) // Assert that the client synced the code correctly. for i, codeHash := range codeHashes { From f3a60b6fab07512501d4f921a7d71bd322264142 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 18:13:29 -0500 Subject: [PATCH 34/46] lint --- sync/statesync/code_syncer_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sync/statesync/code_syncer_test.go b/sync/statesync/code_syncer_test.go index 040853116f..2d0b26a371 100644 --- a/sync/statesync/code_syncer_test.go +++ b/sync/statesync/code_syncer_test.go @@ -61,7 +61,8 @@ func testCodeSyncer(t *testing.T, test codeSyncerTest) { codeSyncer.start(context.Background()) for _, codeHashes := range test.codeRequestHashes { - require.ErrorIs(t, codeSyncer.addCode(codeHashes), test.err) + err := codeSyncer.addCode(codeHashes) + require.ErrorIs(t, err, test.err) } codeSyncer.notifyAccountTrieCompleted() From 6f63d49bf7dedf6ed83cd59d23c949643ef30b67 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Mon, 17 Nov 2025 18:28:06 -0500 Subject: [PATCH 35/46] require no error --- sync/statesync/code_syncer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/statesync/code_syncer_test.go b/sync/statesync/code_syncer_test.go index 2d0b26a371..14f69f27c2 100644 --- a/sync/statesync/code_syncer_test.go +++ b/sync/statesync/code_syncer_test.go @@ -62,7 +62,7 @@ func testCodeSyncer(t *testing.T, test codeSyncerTest) { for _, codeHashes := range test.codeRequestHashes { err := codeSyncer.addCode(codeHashes) - require.ErrorIs(t, err, test.err) + require.NoError(t, err) } codeSyncer.notifyAccountTrieCompleted() From f99591af470ea4f737c9697a5f0f86e462ad90e2 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Tue, 18 Nov 2025 10:34:17 -0500 Subject: [PATCH 36/46] add blockchain stop back --- core/blockchain_ext_test.go | 2 ++ sync/statesync/code_syncer_test.go | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/core/blockchain_ext_test.go b/core/blockchain_ext_test.go index e4ab5942f2..d16a84d1c2 100644 --- a/core/blockchain_ext_test.go +++ b/core/blockchain_ext_test.go @@ -215,6 +215,8 @@ func checkBlockChainState( newChainDataDir := copyFlatDir(t, oldChainDataDir) restartedChain, err := create(originalDB, gspec, lastAcceptedBlock.Hash(), newChainDataDir) require.NoError(err) + defer restartedChain.Stop() + currentBlock := restartedChain.CurrentBlock() require.Equal(lastAcceptedBlock.Hash(), currentBlock.Hash(), "Restarted chain's current block does not match last accepted block") restartedLastAcceptedBlock := restartedChain.LastConsensusAcceptedBlock() diff --git a/sync/statesync/code_syncer_test.go b/sync/statesync/code_syncer_test.go index 14f69f27c2..2994eabb19 100644 --- a/sync/statesync/code_syncer_test.go +++ b/sync/statesync/code_syncer_test.go @@ -61,8 +61,7 @@ func testCodeSyncer(t *testing.T, test codeSyncerTest) { codeSyncer.start(context.Background()) for _, codeHashes := range test.codeRequestHashes { - err := codeSyncer.addCode(codeHashes) - require.NoError(t, err) + require.NoError(t, codeSyncer.addCode(codeHashes)) } codeSyncer.notifyAccountTrieCompleted() From 6743707799d2c3fba65e27d7601c67f974cc50a6 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Tue, 18 Nov 2025 13:49:24 -0500 Subject: [PATCH 37/46] use binint.cmp --- core/blockchain_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 93739a77f0..ec98e3c2b2 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -1003,11 +1003,11 @@ func TestEIP3651(t *testing.T) { tx := block.Transactions()[0] gasPrice := new(big.Int).Add(block.BaseFee(), tx.EffectiveGasTipValue(block.BaseFee())) expected := new(big.Int).SetUint64(block.GasUsed() * gasPrice.Uint64()) - require.Equal(t, expected, actual, "miner balance incorrect: expected %d, got %d", expected, actual) + require.Zero(t, actual.Cmp(expected), "miner balance incorrect: expected %d, got %d", expected, actual) // 4: Ensure the tx sender paid for the gasUsed * (block baseFee + effectiveGasTip). // Note this differs from go-ethereum where the miner receives the gasUsed * block baseFee, // as our handling of the coinbase payment is different. actual = new(big.Int).Sub(funds, state.GetBalance(addr1).ToBig()) - require.Equal(t, expected, actual, "sender balance incorrect: expected %d, got %d", expected, actual) + require.Zero(t, actual.Cmp(expected), "sender balance incorrect: expected %d, got %d", expected, actual) } From 25db6a9850bc263574b2f6d5a89f283499905c81 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Wed, 19 Nov 2025 16:47:10 -0500 Subject: [PATCH 38/46] restore blockcahin test --- core/blockchain_test.go | 142 ++++++++++++++++++++++++++++------------ 1 file changed, 99 insertions(+), 43 deletions(-) diff --git a/core/blockchain_test.go b/core/blockchain_test.go index ec98e3c2b2..12d55c8c81 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/libevm/crypto" "github.com/ava-labs/libevm/eth/tracers/logger" "github.com/ava-labs/libevm/ethdb" - "github.com/stretchr/testify/require" "github.com/ava-labs/subnet-evm/consensus/dummy" "github.com/ava-labs/subnet-evm/core/state/pruner" @@ -354,7 +353,9 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { } blockchain, err := createBlockChain(chainDB, pruningConfig, gspec, common.Hash{}) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } defer blockchain.Stop() // This call generates a chain of 3 blocks. @@ -363,12 +364,17 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), ethparams.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) - require.NoError(t, err) + if err != nil { + t.Fatal(err) + } - _, err = blockchain.InsertChain(chain) - require.NoError(t, err) + if _, err := blockchain.InsertChain(chain); err != nil { + t.Fatal(err) + } for _, block := range chain { - require.NoError(t, blockchain.Accept(block), "failed to accept block %d", block.NumberU64()) + if err := blockchain.Accept(block); err != nil { + t.Fatal(err) + } } blockchain.DrainAcceptorQueue() @@ -376,11 +382,15 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { blockchain.Stop() blockchain, err = createBlockChain(chainDB, pruningConfig, gspec, lastAcceptedHash) - require.NoError(t, err, "failed to create blockchain") + if err != nil { + t.Fatal(err) + } // Confirm that the node does not have the state for intermediate nodes (exclude the last accepted block) for _, block := range chain[:len(chain)-1] { - require.False(t, blockchain.HasState(block.Root()), "Expected blockchain to be missing state for intermediate block %d with pruning enabled", block.NumberU64()) + if blockchain.HasState(block.Root()) { + t.Fatalf("Expected blockchain to be missing state for intermediate block %d with pruning enabled", block.NumberU64()) + } } blockchain.Stop() @@ -402,11 +412,15 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { gspec, lastAcceptedHash, ) - require.NoError(t, err, "failed to create blockchain") + if err != nil { + t.Fatal(err) + } defer blockchain.Stop() for _, block := range chain { - require.True(t, blockchain.HasState(block.Root()), "Expected blockchain to have state for block %d", block.NumberU64()) + if !blockchain.HasState(block.Root()) { + t.Fatalf("failed to re-generate state for block %d", block.NumberU64()) + } } } @@ -521,27 +535,40 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { engine = dummy.NewCoinbaseFaker() ) _, forkA, _, err := GenerateChainWithGenesis(gspec, engine, c.forkA, 10, func(int, *BlockGen) {}) - require.NoError(t, err, "failed to generate chain A") + if err != nil { + t.Fatal(err) + } _, forkB, _, err := GenerateChainWithGenesis(gspec, engine, c.forkB, 10, func(int, *BlockGen) {}) - require.NoError(t, err, "failed to generate chain B") + if err != nil { + t.Fatal(err) + } // Initialize test chain db := rawdb.NewMemoryDatabase() cacheConfig := DefaultCacheConfigWithScheme(scheme) cacheConfig.ChainDataDir = t.TempDir() chain, err := NewBlockChain(db, cacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) - require.NoError(t, err, "failed to create tester chain: %v", err) - + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } // Insert forkA and forkB, the canonical should on forkA still - n, err := chain.InsertChain(forkA) - require.NoError(t, err, "block %d: failed to insert into chain: %v", n, err) - n, err = chain.InsertChain(forkB) - require.NoError(t, err, "block %d: failed to insert into chain: %v", n, err) + if n, err := chain.InsertChain(forkA); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", n, err) + } + if n, err := chain.InsertChain(forkB); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", n, err) + } verify := func(head *types.Block) { - require.Equal(t, head.Hash(), chain.CurrentBlock().Hash(), "Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash()) - require.Equal(t, head.Hash(), chain.CurrentHeader().Hash(), "Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash()) - require.True(t, chain.HasState(head.Root()), "Lost block state %v %x", head.Number(), head.Hash()) + if chain.CurrentBlock().Hash() != head.Hash() { + t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash()) + } + if chain.CurrentHeader().Hash() != head.Hash() { + t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash()) + } + if !chain.HasState(head.Root()) { + t.Fatalf("Lost block state %v %x", head.Number(), head.Hash()) + } } // Switch canonical chain to forkB if necessary @@ -549,7 +576,9 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { verify(forkB[len(forkB)-1]) } else { verify(forkA[len(forkA)-1]) - require.NoError(t, chain.SetPreference(forkB[len(forkB)-1])) + if err := chain.SetPreference(forkB[len(forkB)-1]); err != nil { + t.Fatal(err) + } verify(forkB[len(forkB)-1]) } @@ -557,12 +586,16 @@ func testCanonicalHashMarker(t *testing.T, scheme string) { for i := 0; i < len(forkB); i++ { block := forkB[i] hash := chain.GetCanonicalHash(block.NumberU64()) - require.Equal(t, block.Hash(), hash, "Unexpected canonical hash %d", block.NumberU64()) + if hash != block.Hash() { + t.Fatalf("Unexpected canonical hash %d", block.NumberU64()) + } } if c.forkA > c.forkB { for i := uint64(c.forkB) + 1; i <= uint64(c.forkA); i++ { hash := chain.GetCanonicalHash(i) - require.Zero(t, hash, "Unexpected canonical hash %d", i) + if hash != (common.Hash{}) { + t.Fatalf("Unexpected canonical hash %d", i) + } } } chain.Stop() @@ -703,12 +736,15 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) { // Debug: true, // Tracer: logger.NewJSONLogger(nil, os.Stdout), }, common.Hash{}, false) - require.NoError(t, err, "failed to create tester chain: %v", err) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } defer chain.Stop() // Import the blocks for _, block := range blocks { - _, err = chain.InsertChain([]*types.Block{block}) - require.NoError(t, err, "block %d: failed to insert into chain: %v", block.NumberU64(), err) + if _, err := chain.InsertChain([]*types.Block{block}); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) + } } } @@ -810,15 +846,19 @@ func TestDeleteThenCreate(t *testing.T) { nonce++ } }) - require.NoError(t, err, "failed to generate chain: %v", err) - + if err != nil { + t.Fatal(err) + } // Import the canonical chain chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) - require.NoError(t, err, "failed to create tester chain: %v", err) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } defer chain.Stop() for _, block := range blocks { - _, err = chain.InsertChain([]*types.Block{block}) - require.NoError(t, err, "block %d: failed to insert into chain: %v", block.NumberU64(), err) + if _, err := chain.InsertChain([]*types.Block{block}); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) + } } } @@ -898,17 +938,24 @@ func TestTransientStorageReset(t *testing.T) { // Initialize the blockchain with 1153 enabled. chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vmConfig, common.Hash{}, false) - require.NoError(t, err, "failed to create tester chain: %v", err) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } defer chain.Stop() // Import the blocks - _, err = chain.InsertChain(blocks) - require.NoError(t, err, "failed to insert into chain: %v", err) + if _, err := chain.InsertChain(blocks); err != nil { + t.Fatalf("failed to insert into chain: %v", err) + } // Check the storage state, err := chain.StateAt(chain.CurrentHeader().Root) - require.NoError(t, err, "failed to load state: %v", err) + if err != nil { + t.Fatalf("Failed to load state %v", err) + } loc := common.BytesToHash([]byte{1}) slot := state.GetState(destAddress, loc) - require.Zero(t, slot, "Unexpected dirty storage slot") + if slot != (common.Hash{}) { + t.Fatalf("Unexpected dirty storage slot") + } } func TestEIP3651(t *testing.T) { @@ -981,17 +1028,22 @@ func TestEIP3651(t *testing.T) { b.AddTx(tx) }) chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfig, gspec, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr)}, common.Hash{}, false) - require.NoError(t, err, "failed to create tester chain: %v", err) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } defer chain.Stop() - _, err = chain.InsertChain(blocks) - require.NoError(t, err, "failed to insert into chain: %v", err) + if n, err := chain.InsertChain(blocks); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", n, err) + } block := chain.GetBlockByNumber(1) // 1+2: Ensure EIP-1559 access lists are accounted for via gas usage. innerGas := vm.GasQuickStep*2 + ethparams.ColdSloadCostEIP2929*2 expectedGas := ethparams.TxGas + 5*vm.GasFastestStep + vm.GasQuickStep + 100 + innerGas // 100 because 0xaaaa is in access list - require.Equal(t, expectedGas, block.GasUsed(), "incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed()) + if block.GasUsed() != expectedGas { + t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed()) + } state, _ := chain.State() @@ -1003,11 +1055,15 @@ func TestEIP3651(t *testing.T) { tx := block.Transactions()[0] gasPrice := new(big.Int).Add(block.BaseFee(), tx.EffectiveGasTipValue(block.BaseFee())) expected := new(big.Int).SetUint64(block.GasUsed() * gasPrice.Uint64()) - require.Zero(t, actual.Cmp(expected), "miner balance incorrect: expected %d, got %d", expected, actual) + if actual.Cmp(expected) != 0 { + t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual) + } // 4: Ensure the tx sender paid for the gasUsed * (block baseFee + effectiveGasTip). // Note this differs from go-ethereum where the miner receives the gasUsed * block baseFee, // as our handling of the coinbase payment is different. actual = new(big.Int).Sub(funds, state.GetBalance(addr1).ToBig()) - require.Zero(t, actual.Cmp(expected), "sender balance incorrect: expected %d, got %d", expected, actual) + if actual.Cmp(expected) != 0 { + t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual) + } } From d54febda30a12587d8f57f016d28e32fcee46e2d Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Thu, 20 Nov 2025 10:28:40 -0500 Subject: [PATCH 39/46] remove exception --- scripts/upstream_files.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/upstream_files.txt b/scripts/upstream_files.txt index 55f87092ab..595c60d727 100644 --- a/scripts/upstream_files.txt +++ b/scripts/upstream_files.txt @@ -32,7 +32,6 @@ triedb/* !core/blockchain_ext.go !core/blockchain_ext_test.go !core/blockchain_log_test.go -!core/blockchain_test.go !core/bounded_buffer.go !core/coretest/* !core/extstate/* From 8ce8366c9154ec744126dbc14c776a723da6c693 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Fri, 21 Nov 2025 13:28:08 -0500 Subject: [PATCH 40/46] Cey feedback --- core/state/trie_prefetcher_extra_test.go | 2 +- plugin/evm/version_test.go | 10 +--------- .../allowlist/allowlisttest/test_allowlist_config.go | 6 ++---- 3 files changed, 4 insertions(+), 14 deletions(-) diff --git a/core/state/trie_prefetcher_extra_test.go b/core/state/trie_prefetcher_extra_test.go index dd677c9c6b..d312b98d4d 100644 --- a/core/state/trie_prefetcher_extra_test.go +++ b/core/state/trie_prefetcher_extra_test.go @@ -123,7 +123,7 @@ func BenchmarkPrefetcherDatabase(b *testing.B) { commit(levelDB, snaps, db) b.Logf("Root: %v, kvs: %d, block: %d (committed)", root, count, block) } - require.Equal(root, previous, "root and previous should be equal") + require.NotEqual(root, previous, "root and previous should not be equal") require.NoError(db.TrieDB().Dereference(previous)) } require.NoError(levelDB.Close()) diff --git a/plugin/evm/version_test.go b/plugin/evm/version_test.go index ecbb093f55..8b61d7e635 100644 --- a/plugin/evm/version_test.go +++ b/plugin/evm/version_test.go @@ -6,10 +6,8 @@ package evm import ( "encoding/json" "os" - "path/filepath" "testing" - "github.com/ava-labs/avalanchego/version" "github.com/stretchr/testify/require" ) @@ -26,11 +24,5 @@ func TestCompatibility(t *testing.T) { var parsedCompat rpcChainCompatibility err = json.Unmarshal(compat, &parsedCompat) require.NoError(t, err, "json decoding compatibility file") - - rpcChainVMVersion, valueInJSON := parsedCompat.RPCChainVMProtocolVersion[Version] - require.True(t, valueInJSON, "%s has subnet-evm version %s missing from rpcChainVMProtocolVersion object", - filepath.Base(compatibilityFile), Version) - require.Equal(t, version.RPCChainVMProtocol, rpcChainVMVersion, - "%s has subnet-evm version %s stated as compatible with RPC chain VM protocol version %d but AvalancheGo protocol version is %d", - filepath.Base(compatibilityFile), Version, rpcChainVMVersion, version.RPCChainVMProtocol) + require.Contains(t, parsedCompat.RPCChainVMProtocolVersion, Version, "subnet-evm version %s missing from rpcChainVMProtocolVersion object", Version) } diff --git a/precompile/allowlist/allowlisttest/test_allowlist_config.go b/precompile/allowlist/allowlisttest/test_allowlist_config.go index 3c45bb1891..8738db0d40 100644 --- a/precompile/allowlist/allowlisttest/test_allowlist_config.go +++ b/precompile/allowlist/allowlisttest/test_allowlist_config.go @@ -212,8 +212,7 @@ func VerifyPrecompileWithAllowListTests(t *testing.T, module modules.Module, ver tests := AllowListConfigVerifyTests(t, module) // Add the contract specific tests to the map of tests to run. for name, test := range verifyTests { - _, exists := tests[name] - require.False(t, exists, "duplicate test name: %s", name) + require.NotContains(t, tests, name, "duplicate test name: %s", name) tests[name] = test } @@ -225,8 +224,7 @@ func EqualPrecompileWithAllowListTests(t *testing.T, module modules.Module, equa tests := AllowListConfigEqualTests(t, module) // Add the contract specific tests to the map of tests to run. for name, test := range equalTests { - _, exists := tests[name] - require.False(t, exists, "duplicate test name: %s", name) + require.NotContains(t, tests, name, "duplicate test name: %s", name) tests[name] = test } From 616638700bf353b09fe86801bfa89f89a6bf691e Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Fri, 21 Nov 2025 13:30:52 -0500 Subject: [PATCH 41/46] Update params/extras/precompile_upgrade_test.go Co-authored-by: Ceyhun Onur Signed-off-by: Jonathan Oppenheimer <147infiniti@gmail.com> --- params/extras/precompile_upgrade_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/extras/precompile_upgrade_test.go b/params/extras/precompile_upgrade_test.go index 4256683442..6ad4471c49 100644 --- a/params/extras/precompile_upgrade_test.go +++ b/params/extras/precompile_upgrade_test.go @@ -288,7 +288,7 @@ func (tt *upgradeCompatibilityTest) run(t *testing.T, chainConfig ChainConfig) { // if this is not the final upgradeBytes, continue applying // the next upgradeBytes. (only check the result on the last apply) if i != len(tt.configs)-1 { - require.Nil(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) + require.NoErrorf(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) chainConfig = newCfg continue } From 1f5bcec72a5fbbc9c1422d58947df599198449ef Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Fri, 21 Nov 2025 13:33:31 -0500 Subject: [PATCH 42/46] Cey feedback --- params/extras/precompile_upgrade_test.go | 4 ++-- sync/client/client_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/params/extras/precompile_upgrade_test.go b/params/extras/precompile_upgrade_test.go index 6ad4471c49..dea3ef2ab8 100644 --- a/params/extras/precompile_upgrade_test.go +++ b/params/extras/precompile_upgrade_test.go @@ -288,7 +288,7 @@ func (tt *upgradeCompatibilityTest) run(t *testing.T, chainConfig ChainConfig) { // if this is not the final upgradeBytes, continue applying // the next upgradeBytes. (only check the result on the last apply) if i != len(tt.configs)-1 { - require.NoErrorf(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) + require.NoError(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) chainConfig = newCfg continue } @@ -296,7 +296,7 @@ func (tt *upgradeCompatibilityTest) run(t *testing.T, chainConfig ChainConfig) { if tt.expectedErrorString != "" { require.ErrorContains(t, err, tt.expectedErrorString) } else { - require.Nil(t, err) + require.NoError(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) } } } diff --git a/sync/client/client_test.go b/sync/client/client_test.go index 104f6aea02..83a7573a5d 100644 --- a/sync/client/client_test.go +++ b/sync/client/client_test.go @@ -665,8 +665,8 @@ func TestGetLeafs(t *testing.T) { return } - leafsResponse, ok := response.(message.LeafsResponse) - require.True(t, ok, "expected leafs response") + leafsResponse := response.(message.LeafsResponse) + require.IsType(t, message.LeafsResponse{}, leafsResponse, "parseLeafsResponse returned incorrect type %T", leafsResponse) test.requireResponse(t, leafsResponse) }) } From 7bf4819e8d901f1cd25ee706cca586bc7d1a8df5 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Fri, 21 Nov 2025 14:35:21 -0500 Subject: [PATCH 43/46] Revert to nil --- params/extras/precompile_upgrade_test.go | 4 ++-- sync/client/client_test.go | 10 +++++++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/params/extras/precompile_upgrade_test.go b/params/extras/precompile_upgrade_test.go index dea3ef2ab8..8e00e75022 100644 --- a/params/extras/precompile_upgrade_test.go +++ b/params/extras/precompile_upgrade_test.go @@ -288,7 +288,7 @@ func (tt *upgradeCompatibilityTest) run(t *testing.T, chainConfig ChainConfig) { // if this is not the final upgradeBytes, continue applying // the next upgradeBytes. (only check the result on the last apply) if i != len(tt.configs)-1 { - require.NoError(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) + require.Nil(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) chainConfig = newCfg continue } @@ -296,7 +296,7 @@ func (tt *upgradeCompatibilityTest) run(t *testing.T, chainConfig ChainConfig) { if tt.expectedErrorString != "" { require.ErrorContains(t, err, tt.expectedErrorString) } else { - require.NoError(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) + require.Nil(t, err, "expecting checkConfigCompatible call %d to return nil", i+1) } } } diff --git a/sync/client/client_test.go b/sync/client/client_test.go index 04613c3b98..2a89c7d895 100644 --- a/sync/client/client_test.go +++ b/sync/client/client_test.go @@ -571,7 +571,15 @@ func TestGetLeafs(t *testing.T) { response, err := handler.OnLeafsRequest(t.Context(), ids.GenerateTestNodeID(), 1, request) require.NoError(t, err) require.NotEmpty(t, response) - return response + var leafResponse message.LeafsResponse + _, err = message.Codec.Unmarshal(response, &leafResponse) + require.NoError(t, err) + leafResponse.Keys = leafResponse.Keys[:len(leafResponse.Keys)-1] + leafResponse.Vals = leafResponse.Vals[:len(leafResponse.Vals)-1] + + modifiedResponse, err := message.Codec.Marshal(message.Version, leafResponse) + require.NoError(t, err) + return modifiedResponse }, expectedErr: errInvalidRangeProof, }, From 5883f1d0356d2527444be63904439e8e33f305b4 Mon Sep 17 00:00:00 2001 From: Jonathan Oppenheimer Date: Fri, 21 Nov 2025 21:54:38 -0500 Subject: [PATCH 44/46] chore: lint --- plugin/evm/vm_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 290f130cac..2679d4c047 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -7,11 +7,11 @@ import ( "context" "crypto/ecdsa" "encoding/json" + "errors" "fmt" "math/big" "os" "path/filepath" - "sync" "testing" "time" From 53490cfd44ceee7bf1eb8a07d2815b54f2356d41 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 24 Nov 2025 15:13:06 +0300 Subject: [PATCH 45/46] revert version_test suggestion --- plugin/evm/version_test.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/plugin/evm/version_test.go b/plugin/evm/version_test.go index 8b61d7e635..f9f07ef9f8 100644 --- a/plugin/evm/version_test.go +++ b/plugin/evm/version_test.go @@ -6,8 +6,10 @@ package evm import ( "encoding/json" "os" + "path/filepath" "testing" + "github.com/ava-labs/avalanchego/version" "github.com/stretchr/testify/require" ) @@ -23,6 +25,10 @@ func TestCompatibility(t *testing.T) { var parsedCompat rpcChainCompatibility err = json.Unmarshal(compat, &parsedCompat) - require.NoError(t, err, "json decoding compatibility file") - require.Contains(t, parsedCompat.RPCChainVMProtocolVersion, Version, "subnet-evm version %s missing from rpcChainVMProtocolVersion object", Version) + rpcChainVMVersion, valueInJSON := parsedCompat.RPCChainVMProtocolVersion[Version] + require.True(t, valueInJSON, "%s has subnet-evm version %s missing from rpcChainVMProtocolVersion object", + filepath.Base(compatibilityFile), Version) + require.Equal(t, version.RPCChainVMProtocol, rpcChainVMVersion, + "%s has subnet-evm version %s stated as compatible with RPC chain VM protocol version %d but AvalancheGo protocol version is %d", + filepath.Base(compatibilityFile), Version, rpcChainVMVersion, version.RPCChainVMProtocol) } From e080ed1e8eb7e664eb8a01097eaecba136b81499 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 24 Nov 2025 17:14:58 +0300 Subject: [PATCH 46/46] fix linter --- plugin/evm/version_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/plugin/evm/version_test.go b/plugin/evm/version_test.go index f9f07ef9f8..c795b738c5 100644 --- a/plugin/evm/version_test.go +++ b/plugin/evm/version_test.go @@ -25,10 +25,12 @@ func TestCompatibility(t *testing.T) { var parsedCompat rpcChainCompatibility err = json.Unmarshal(compat, &parsedCompat) + require.NoError(t, err, "json decoding compatibility file") + rpcChainVMVersion, valueInJSON := parsedCompat.RPCChainVMProtocolVersion[Version] - require.True(t, valueInJSON, "%s has subnet-evm version %s missing from rpcChainVMProtocolVersion object", + require.Truef(t, valueInJSON, "%s has subnet-evm version %s missing from rpcChainVMProtocolVersion object", filepath.Base(compatibilityFile), Version) - require.Equal(t, version.RPCChainVMProtocol, rpcChainVMVersion, + require.Equalf(t, version.RPCChainVMProtocol, rpcChainVMVersion, "%s has subnet-evm version %s stated as compatible with RPC chain VM protocol version %d but AvalancheGo protocol version is %d", filepath.Base(compatibilityFile), Version, rpcChainVMVersion, version.RPCChainVMProtocol) }