diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cf7bc2e42684..8643fe57dba1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -110,15 +110,16 @@ jobs: - name: Build AvalancheGo Binary shell: bash run: ./scripts/build.sh - - name: Run e2e tests - uses: ./.github/actions/run-monitored-tmpnet-cmd - with: - run: ./scripts/tests.upgrade.sh - artifact_prefix: upgrade - prometheus_username: ${{ secrets.PROMETHEUS_ID || '' }} - prometheus_password: ${{ secrets.PROMETHEUS_PASSWORD || '' }} - loki_username: ${{ secrets.LOKI_ID || '' }} - loki_password: ${{ secrets.LOKI_PASSWORD || '' }} + # TODO: Reactivate test once v1.13.0 is published + # - name: Run e2e tests + # uses: ./.github/actions/run-monitored-tmpnet-cmd + # with: + # run: ./scripts/tests.upgrade.sh + # artifact_prefix: upgrade + # prometheus_username: ${{ secrets.PROMETHEUS_ID || '' }} + # prometheus_password: ${{ secrets.PROMETHEUS_PASSWORD || '' }} + # loki_username: ${{ secrets.LOKI_ID || '' }} + # loki_password: ${{ secrets.LOKI_PASSWORD || '' }} Lint: runs-on: ubuntu-latest steps: diff --git a/RELEASES.md b/RELEASES.md index b31759b1f900..8896751c3d91 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,13 +1,28 @@ # Release Notes -## [v1.12.3] (pending) +## [v1.13.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.13.0) -- Extended the network health check by also alerting if a primary network validator has no nodes connected to it. Runs a configurable time after startup or 10 minutes by default. +This upgrade consists of the following Avalanche Community Proposal (ACP): +- [ACP-176](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/176-dynamic-evm-gas-limit-and-price-discovery-updates/README.md) Dynamic EVM Gas Limits and Price Discovery Updates + +The ACP in this upgrade goes into effect at 11 AM ET (3 PM UTC) on Tuesday, April 8th, 2025 on Mainnet. + +**All Fortuna supporting Mainnet nodes should upgrade before 11 AM ET, April 8th 2025.** + +The plugin version is unchanged at `39` and is compatible with version `v1.12.2`. + +### APIs + +- Added ProposerVM block timestamp metrics: `avalanche_proposervm_last_accepted_timestamp` +- Added network health check to alert if a primary network validator has no ingress connections. Runs a configurable time after startup or 10 minutes by default. ### Configs -- How long after startup the aforementioned health check runs can be configured via: -`--network-no-ingress-connections-grace-period` +- Added: + - `--proposervm-min-block-duration` + - `--network-no-ingress-connections-grace-period` to configure how long after startup it is expected for a Mainnet validator to have received an ingress connection. + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.12.2...v1.13.0 ## [v1.12.2](https://github.com/ava-labs/avalanchego/releases/tag/v1.12.2) diff --git a/api/server/server.go b/api/server/server.go index 8af570d09bdd..d2a56b757506 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -179,7 +179,7 @@ func (s *server) RegisterChain(chainName string, ctx *snow.ConsensusContext, vm return } - s.log.Verbo("about to add API endpoints", + s.log.Info("adding API endpoints", zap.Stringer("chainID", ctx.ChainID), ) // all subroutes to a chain begin with "bc/" diff --git a/app/app.go b/app/app.go index 8ac3798ea8b6..f9a947155573 100644 --- a/app/app.go +++ b/app/app.go @@ -74,7 +74,9 @@ func New(config nodeconfig.Config) (App, error) { n, err := node.New(&config, logFactory, log) if err != nil { - log.Fatal("failed to initialize node", zap.Error(err)) + log.Fatal("failed to initialize node", + zap.Error(err), + ) log.Stop() logFactory.Close() return nil, fmt.Errorf("failed to initialize node: %w", err) diff --git a/chains/manager.go b/chains/manager.go index f772dfe73996..48bf6879be4f 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -348,7 +348,7 @@ func New(config *ManagerConfig) (Manager, error) { // Invariant: Tracked Subnet must be checked before calling this function func (m *manager) QueueChainCreation(chainParams ChainParameters) { if sb, _ := m.Subnets.GetOrCreate(chainParams.SubnetID); !sb.AddChain(chainParams.ID) { - m.Log.Debug("skipping chain creation", + m.Log.Warn("skipping chain creation", zap.String("reason", "chain already staged"), zap.Stringer("subnetID", chainParams.SubnetID), zap.Stringer("chainID", chainParams.ID), @@ -358,7 +358,7 @@ func (m *manager) QueueChainCreation(chainParams ChainParameters) { } if ok := m.chainsQueue.PushRight(chainParams); !ok { - m.Log.Warn("skipping chain creation", + m.Log.Error("skipping chain creation", zap.String("reason", "couldn't enqueue chain"), zap.Stringer("subnetID", chainParams.SubnetID), zap.Stringer("chainID", chainParams.ID), diff --git a/database/pebbledb/db.go b/database/pebbledb/db.go index 8d4080bb8832..c76e9f226c9f 100644 --- a/database/pebbledb/db.go +++ b/database/pebbledb/db.go @@ -87,8 +87,7 @@ func New(file string, configBytes []byte, log logging.Logger, _ prometheus.Regis } opts.Experimental.ReadSamplingMultiplier = -1 // Disable seek compaction - log.Info( - "opening pebble", + log.Info("opening pebble", zap.Reflect("config", cfg), ) diff --git a/nat/nat.go b/nat/nat.go index 28cdb1083eac..e16c92f1a51e 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -190,5 +190,5 @@ func (m *Mapper) updateIP(ip *utils.Atomic[netip.AddrPort]) { func (m *Mapper) UnmapAllPorts() { close(m.closer) m.wg.Wait() - m.log.Info("Unmapped all ports") + m.log.Info("unmapped all ports") } diff --git a/network/dialer/dialer.go b/network/dialer/dialer.go index 2517184fedcc..e1bce3ff279e 100644 --- a/network/dialer/dialer.go +++ b/network/dialer/dialer.go @@ -49,8 +49,7 @@ func NewDialer(network string, dialerConfig Config, log logging.Logger) Dialer { } else { throttler = throttling.NewDialThrottler(int(dialerConfig.ThrottleRps)) } - log.Debug( - "creating dialer", + log.Info("creating dialer", zap.Uint32("throttleRPS", dialerConfig.ThrottleRps), zap.Duration("dialTimeout", dialerConfig.ConnectionTimeout), ) diff --git a/network/example_test.go b/network/example_test.go index 4aaa8aa9ed07..c1c17be51791 100644 --- a/network/example_test.go +++ b/network/example_test.go @@ -36,15 +36,13 @@ type testExternalHandler struct { // called by messages explicitly sent by the peer. If timeouts are required, // that must be handled by the user of this utility. func (t *testExternalHandler) HandleInbound(_ context.Context, message message.InboundMessage) { - t.log.Info( - "receiving message", + t.log.Info("receiving message", zap.Stringer("op", message.Op()), ) } func (t *testExternalHandler) Connected(nodeID ids.NodeID, version *version.Application, subnetID ids.ID) { - t.log.Info( - "connected", + t.log.Info("connected", zap.Stringer("nodeID", nodeID), zap.Stringer("version", version), zap.Stringer("subnetID", subnetID), @@ -52,8 +50,7 @@ func (t *testExternalHandler) Connected(nodeID ids.NodeID, version *version.Appl } func (t *testExternalHandler) Disconnected(nodeID ids.NodeID) { - t.log.Info( - "disconnected", + t.log.Info("disconnected", zap.Stringer("nodeID", nodeID), ) } @@ -98,8 +95,7 @@ func ExampleNewTestNetwork() { trackedSubnets, ) if err != nil { - log.Fatal( - "failed to create test network config", + log.Fatal("failed to create test network config", zap.Error(err), ) return @@ -111,8 +107,7 @@ func ExampleNewTestNetwork() { handler, ) if err != nil { - log.Fatal( - "failed to create test network", + log.Fatal("failed to create test network", zap.Error(err), ) return @@ -138,8 +133,7 @@ func ExampleNewTestNetwork() { // Calling network.Dispatch() will block until a fatal error occurs or // network.StartClose() is called. err = network.Dispatch() - log.Info( - "network exited", + log.Info("network exited", zap.Error(err), ) } diff --git a/network/network.go b/network/network.go index 09c3425d28ce..2f7b49b19251 100644 --- a/network/network.go +++ b/network/network.go @@ -457,8 +457,7 @@ func (n *network) Connected(nodeID ids.NodeID) { n.peersLock.Lock() peer, ok := n.connectingPeers.GetByID(nodeID) if !ok { - n.peerConfig.Log.Error( - "unexpectedly connected to peer when not marked as attempting to connect", + n.peerConfig.Log.Error("unexpectedly connected to peer when not marked as attempting to connect", zap.Stringer("nodeID", nodeID), ) n.peersLock.Unlock() @@ -609,7 +608,9 @@ func (n *network) Dispatch() error { conn, err := n.listener.Accept() // Returns error when n.Close() is called if err != nil { - n.peerConfig.Log.Debug("error during server accept", zap.Error(err)) + n.peerConfig.Log.Debug("error during server accept", + zap.Error(err), + ) // Sleep for a small amount of time to try to wait for the // error to go away. time.Sleep(time.Millisecond) @@ -947,8 +948,7 @@ func (n *network) dial(nodeID ids.NodeID, ip *trackedIP) { // "connection reset by peer" errors from interfering with the // later duplicated connection check. if connecting || connected { - n.peerConfig.Log.Verbo( - "exiting attempt to dial peer", + n.peerConfig.Log.Verbo("exiting attempt to dial peer", zap.String("reason", "already connected"), zap.Stringer("nodeID", nodeID), ) @@ -983,8 +983,7 @@ func (n *network) dial(nodeID ids.NodeID, ip *trackedIP) { conn, err := n.dialer.Dial(n.onCloseCtx, ip.ip) if err != nil { - n.peerConfig.Log.Verbo( - "failed to reach peer, attempting again", + n.peerConfig.Log.Verbo("failed to reach peer, attempting again", zap.Stringer("nodeID", nodeID), zap.Stringer("peerIP", ip.ip), zap.Duration("delay", ip.delay), @@ -1000,8 +999,7 @@ func (n *network) dial(nodeID ids.NodeID, ip *trackedIP) { err = n.upgrade(conn, n.clientUpgrader, false) if err != nil { - n.peerConfig.Log.Verbo( - "failed to upgrade, attempting again", + n.peerConfig.Log.Verbo("failed to upgrade, attempting again", zap.Stringer("nodeID", nodeID), zap.Stringer("peerIP", ip.ip), zap.Duration("delay", ip.delay), @@ -1053,14 +1051,13 @@ func (n *network) upgrade(conn net.Conn, upgrader peer.Upgrader, isIngress bool) if nodeID == n.config.MyNodeID { _ = tlsConn.Close() - n.peerConfig.Log.Verbo("dropping connection to myself") + n.peerConfig.Log.Trace("dropping connection to myself") return nil } if !n.AllowConnection(nodeID) { _ = tlsConn.Close() - n.peerConfig.Log.Verbo( - "dropping undesired connection", + n.peerConfig.Log.Verbo("dropping undesired connection", zap.Stringer("nodeID", nodeID), ) return nil @@ -1071,8 +1068,7 @@ func (n *network) upgrade(conn net.Conn, upgrader peer.Upgrader, isIngress bool) n.peersLock.Unlock() _ = tlsConn.Close() - n.peerConfig.Log.Verbo( - "dropping connection", + n.peerConfig.Log.Verbo("dropping connection", zap.String("reason", "shutting down the p2p network"), zap.Stringer("nodeID", nodeID), ) @@ -1083,8 +1079,7 @@ func (n *network) upgrade(conn net.Conn, upgrader peer.Upgrader, isIngress bool) n.peersLock.Unlock() _ = tlsConn.Close() - n.peerConfig.Log.Verbo( - "dropping connection", + n.peerConfig.Log.Verbo("dropping connection", zap.String("reason", "already connecting to peer"), zap.Stringer("nodeID", nodeID), ) @@ -1095,9 +1090,8 @@ func (n *network) upgrade(conn net.Conn, upgrader peer.Upgrader, isIngress bool) n.peersLock.Unlock() _ = tlsConn.Close() - n.peerConfig.Log.Verbo( - "dropping connection", - zap.String("reason", "already connecting to peer"), + n.peerConfig.Log.Verbo("dropping connection", + zap.String("reason", "already connected to peer"), zap.Stringer("nodeID", nodeID), ) return nil diff --git a/network/p2p/acp118/aggregator.go b/network/p2p/acp118/aggregator.go index d9d289ffec45..02f0e0a4734f 100644 --- a/network/p2p/acp118/aggregator.go +++ b/network/p2p/acp118/aggregator.go @@ -151,8 +151,7 @@ func (s *SignatureAggregator) AggregateSignatures( return msg, aggregatedStakeWeight, totalStakeWeight, nil case result := <-results: if result.Err != nil { - s.log.Debug( - "dropping response", + s.log.Debug("dropping response", zap.Stringer("nodeID", result.NodeID), zap.Error(err), ) @@ -161,8 +160,7 @@ func (s *SignatureAggregator) AggregateSignatures( // Validators may share public keys so drop any duplicate signatures if signerBitSet.Contains(result.Validator.Index) { - s.log.Debug( - "dropping duplicate signature", + s.log.Debug("dropping duplicate signature", zap.Stringer("nodeID", result.NodeID), zap.Error(err), ) diff --git a/network/p2p/gossip/gossip.go b/network/p2p/gossip/gossip.go index b44201ca90ed..e083f5fbcf6b 100644 --- a/network/p2p/gossip/gossip.go +++ b/network/p2p/gossip/gossip.go @@ -230,8 +230,7 @@ func (p *PullGossiper[_]) handleResponse( err error, ) { if err != nil { - p.log.Debug( - "failed gossip request", + p.log.Debug("failed gossip request", zap.Stringer("nodeID", nodeID), zap.Error(err), ) @@ -240,7 +239,9 @@ func (p *PullGossiper[_]) handleResponse( gossip, err := ParseAppResponse(responseBytes) if err != nil { - p.log.Debug("failed to unmarshal gossip response", zap.Error(err)) + p.log.Trace("failed to unmarshal gossip response", + zap.Error(err), + ) return } @@ -250,8 +251,7 @@ func (p *PullGossiper[_]) handleResponse( gossipable, err := p.marshaller.UnmarshalGossip(bytes) if err != nil { - p.log.Debug( - "failed to unmarshal gossip", + p.log.Trace("failed to unmarshal gossip", zap.Stringer("nodeID", nodeID), zap.Error(err), ) @@ -259,14 +259,12 @@ func (p *PullGossiper[_]) handleResponse( } gossipID := gossipable.GossipID() - p.log.Debug( - "received gossip", + p.log.Debug("received gossip", zap.Stringer("nodeID", nodeID), zap.Stringer("id", gossipID), ) if err := p.set.Add(gossipable); err != nil { - p.log.Debug( - "failed to add gossip to the known set", + p.log.Debug("failed to add gossip to the known set", zap.Stringer("nodeID", nodeID), zap.Stringer("id", gossipID), zap.Error(err), @@ -577,7 +575,9 @@ func Every(ctx context.Context, log logging.Logger, gossiper Gossiper, frequency select { case <-ticker.C: if err := gossiper.Gossip(ctx); err != nil { - log.Warn("failed to gossip", zap.Error(err)) + log.Warn("failed to gossip", + zap.Error(err), + ) } case <-ctx.Done(): log.Debug("shutting down gossip") diff --git a/network/p2p/gossip/handler.go b/network/p2p/gossip/handler.go index 1205a5f2b7df..d8e094ca332d 100644 --- a/network/p2p/gossip/handler.go +++ b/network/p2p/gossip/handler.go @@ -92,7 +92,9 @@ func (h Handler[T]) AppRequest(_ context.Context, _ ids.NodeID, _ time.Time, req func (h Handler[_]) AppGossip(_ context.Context, nodeID ids.NodeID, gossipBytes []byte) { gossip, err := ParseAppGossip(gossipBytes) if err != nil { - h.log.Debug("failed to unmarshal gossip", zap.Error(err)) + h.log.Trace("failed to unmarshal gossip", + zap.Error(err), + ) return } @@ -101,7 +103,7 @@ func (h Handler[_]) AppGossip(_ context.Context, nodeID ids.NodeID, gossipBytes receivedBytes += len(bytes) gossipable, err := h.marshaller.UnmarshalGossip(bytes) if err != nil { - h.log.Debug("failed to unmarshal gossip", + h.log.Trace("failed to unmarshal gossip", zap.Stringer("nodeID", nodeID), zap.Error(err), ) @@ -109,8 +111,7 @@ func (h Handler[_]) AppGossip(_ context.Context, nodeID ids.NodeID, gossipBytes } if err := h.set.Add(gossipable); err != nil { - h.log.Debug( - "failed to add gossip to the known set", + h.log.Debug("failed to add gossip to the known set", zap.Stringer("nodeID", nodeID), zap.Stringer("id", gossipable.GossipID()), zap.Error(err), diff --git a/network/p2p/router.go b/network/p2p/router.go index dba1bd124d19..9fba6e582c60 100644 --- a/network/p2p/router.go +++ b/network/p2p/router.go @@ -112,7 +112,7 @@ func (r *router) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID ui start := time.Now() parsedMsg, handler, handlerID, ok := r.parse(request) if !ok { - r.log.Debug("received message for unregistered handler", + r.log.Trace("received message for unregistered handler", zap.Stringer("messageOp", message.AppRequestOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), @@ -197,7 +197,7 @@ func (r *router) AppGossip(ctx context.Context, nodeID ids.NodeID, gossip []byte start := time.Now() parsedMsg, handler, handlerID, ok := r.parse(gossip) if !ok { - r.log.Debug("received message for unregistered handler", + r.log.Trace("received message for unregistered handler", zap.Stringer("messageOp", message.AppGossipOp), zap.Stringer("nodeID", nodeID), zap.Binary("message", gossip), diff --git a/network/p2p/validators.go b/network/p2p/validators.go index 699d74e5e5f8..2e3ffdba9de7 100644 --- a/network/p2p/validators.go +++ b/network/p2p/validators.go @@ -89,12 +89,16 @@ func (v *Validators) refresh(ctx context.Context) { height, err := v.validators.GetCurrentHeight(ctx) if err != nil { - v.log.Warn("failed to get current height", zap.Error(err)) + v.log.Warn("failed to get current height", + zap.Error(err), + ) return } validatorSet, err := v.validators.GetValidatorSet(ctx, height, v.subnetID) if err != nil { - v.log.Warn("failed to get validator set", zap.Error(err)) + v.log.Warn("failed to get validator set", + zap.Error(err), + ) return } diff --git a/network/peer/message_queue.go b/network/peer/message_queue.go index f2ccef6dc915..67e41f9ac1bf 100644 --- a/network/peer/message_queue.go +++ b/network/peer/message_queue.go @@ -90,8 +90,7 @@ func NewThrottledMessageQueue( func (q *throttledMessageQueue) Push(ctx context.Context, msg message.OutboundMessage) bool { if err := ctx.Err(); err != nil { - q.log.Debug( - "dropping outgoing message", + q.log.Debug("dropping outgoing message", zap.Stringer("messageOp", msg.Op()), zap.Stringer("nodeID", q.id), zap.Error(err), @@ -102,8 +101,7 @@ func (q *throttledMessageQueue) Push(ctx context.Context, msg message.OutboundMe // Acquire space on the outbound message queue, or drop [msg] if we can't. if !q.outboundMsgThrottler.Acquire(msg, q.id) { - q.log.Debug( - "dropping outgoing message", + q.log.Debug("dropping outgoing message", zap.String("reason", "rate-limiting"), zap.Stringer("messageOp", msg.Op()), zap.Stringer("nodeID", q.id), @@ -120,8 +118,7 @@ func (q *throttledMessageQueue) Push(ctx context.Context, msg message.OutboundMe defer q.cond.L.Unlock() if q.closed { - q.log.Debug( - "dropping outgoing message", + q.log.Debug("dropping outgoing message", zap.String("reason", "closed queue"), zap.Stringer("messageOp", msg.Op()), zap.Stringer("nodeID", q.id), @@ -227,16 +224,14 @@ func (q *blockingMessageQueue) Push(ctx context.Context, msg message.OutboundMes ctxDone := ctx.Done() select { case <-q.closing: - q.log.Debug( - "dropping message", + q.log.Debug("dropping message", zap.String("reason", "closed queue"), zap.Stringer("messageOp", msg.Op()), ) q.onFailed.SendFailed(msg) return false case <-ctxDone: - q.log.Debug( - "dropping message", + q.log.Debug("dropping message", zap.String("reason", "cancelled context"), zap.Stringer("messageOp", msg.Op()), ) @@ -249,16 +244,14 @@ func (q *blockingMessageQueue) Push(ctx context.Context, msg message.OutboundMes case q.queue <- msg: return true case <-ctxDone: - q.log.Debug( - "dropping message", + q.log.Debug("dropping message", zap.String("reason", "cancelled context"), zap.Stringer("messageOp", msg.Op()), ) q.onFailed.SendFailed(msg) return false case <-q.closing: - q.log.Debug( - "dropping message", + q.log.Debug("dropping message", zap.String("reason", "closed queue"), zap.Stringer("messageOp", msg.Op()), ) diff --git a/network/peer/network.go b/network/peer/network.go index 19161321f1c3..249ec6001dc0 100644 --- a/network/peer/network.go +++ b/network/peer/network.go @@ -21,7 +21,8 @@ type Network interface { AllowConnection(peerID ids.NodeID) bool // Track allows the peer to notify the network of potential new peers to - // connect to. + // connect to. This function should only return an error if the signature on + // one of the provided IPs is invalid. Track(ips []*ips.ClaimedIPPort) error // Disconnected is called when the peer finishes shutting down. It is not diff --git a/network/peer/peer.go b/network/peer/peer.go index d65a85171f90..882fe33c81df 100644 --- a/network/peer/peer.go +++ b/network/peer/peer.go @@ -405,7 +405,7 @@ func (p *peer) readMessages() { // Parse the message length msgLen, err := readMsgLen(msgLenBytes, constants.DefaultMaxMessageSize) if err != nil { - p.Log.Verbo("error parsing message length", + p.Log.Trace("error parsing message length", zap.Stringer("nodeID", p.id), zap.Error(err), ) @@ -475,7 +475,7 @@ func (p *peer) readMessages() { // Parse the message msg, err := p.MessageCreator.Parse(msgBytes, p.id, onFinishedHandling) if err != nil { - p.Log.Verbo("failed to parse message", + p.Log.Trace("failed to parse message", zap.Stringer("nodeID", p.id), zap.Binary("messageBytes", msgBytes), zap.Error(err), @@ -605,7 +605,7 @@ func (p *peer) writeMessage(writer io.Writer, msg message.OutboundMessage) { msgLen := uint32(len(msgBytes)) msgLenBytes, err := writeMsgLen(msgLen, constants.DefaultMaxMessageSize) if err != nil { - p.Log.Verbo("error writing message length", + p.Log.Error("error writing message length", zap.Stringer("nodeID", p.id), zap.Error(err), ) @@ -775,7 +775,7 @@ func (p *peer) handle(msg message.InboundMessage) { func (p *peer) handlePing(msg *p2p.Ping) { if msg.Uptime > 100 { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PingOp), zap.Stringer("subnetID", constants.PrimaryNetworkID), @@ -821,7 +821,7 @@ func (*peer) handlePong(*p2p.Pong) {} func (p *peer) handleHandshake(msg *p2p.Handshake) { if p.gotHandshake.Get() { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), zap.String("reason", "already received handshake"), @@ -899,7 +899,7 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { for _, subnetIDBytes := range msg.TrackedSubnets { subnetID, err := ids.ToID(subnetIDBytes) if err != nil { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), zap.String("field", "trackedSubnets"), @@ -923,7 +923,7 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { } if p.supportedACPs.Overlaps(p.objectedACPs) { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), zap.String("field", "acps"), @@ -942,7 +942,7 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { var err error knownPeers, err = bloom.Parse(msg.KnownPeers.Filter) if err != nil { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), zap.String("field", "knownPeers.filter"), @@ -954,7 +954,7 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { salt = msg.KnownPeers.Salt if saltLen := len(salt); saltLen > maxBloomSaltLen { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), zap.String("field", "knownPeers.salt"), @@ -967,7 +967,7 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { addr, ok := ips.AddrFromSlice(msg.IpAddr) if !ok { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), zap.String("field", "ip"), @@ -979,7 +979,7 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { port := uint16(msg.IpPort) if msg.IpPort == 0 { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), zap.String("field", "port"), @@ -1020,7 +1020,7 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { signature, err := bls.SignatureFromBytes(msg.IpBlsSig) if err != nil { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), zap.String("field", "blsSignature"), @@ -1072,7 +1072,7 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { func (p *peer) handleGetPeerList(msg *p2p.GetPeerList) { if !p.finishedHandshake.Get() { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.GetPeerListOp), zap.String("reason", "not finished handshake"), @@ -1083,7 +1083,7 @@ func (p *peer) handleGetPeerList(msg *p2p.GetPeerList) { knownPeersMsg := msg.GetKnownPeers() filter, err := bloom.Parse(knownPeersMsg.GetFilter()) if err != nil { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.GetPeerListOp), zap.String("field", "knownPeers.filter"), @@ -1095,7 +1095,7 @@ func (p *peer) handleGetPeerList(msg *p2p.GetPeerList) { salt := knownPeersMsg.GetSalt() if saltLen := len(salt); saltLen > maxBloomSaltLen { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.GetPeerListOp), zap.String("field", "knownPeers.salt"), @@ -1143,7 +1143,7 @@ func (p *peer) handlePeerList(msg *p2p.PeerList) { for i, claimedIPPort := range msg.ClaimedIpPorts { tlsCert, err := staking.ParseCertificate(claimedIPPort.X509Certificate) if err != nil { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), zap.String("field", "cert"), @@ -1155,7 +1155,7 @@ func (p *peer) handlePeerList(msg *p2p.PeerList) { addr, ok := ips.AddrFromSlice(claimedIPPort.IpAddr) if !ok { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), zap.String("field", "ip"), @@ -1167,7 +1167,7 @@ func (p *peer) handlePeerList(msg *p2p.PeerList) { port := uint16(claimedIPPort.IpPort) if port == 0 { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), zap.String("field", "port"), @@ -1189,7 +1189,7 @@ func (p *peer) handlePeerList(msg *p2p.PeerList) { } if err := p.Network.Track(discoveredIPs); err != nil { - p.Log.Debug(malformedMessageLog, + p.Log.Trace(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), zap.String("field", "claimedIP"), diff --git a/network/throttling/bandwidth_throttler.go b/network/throttling/bandwidth_throttler.go index 58938f31c11a..47dbf27bc89f 100644 --- a/network/throttling/bandwidth_throttler.go +++ b/network/throttling/bandwidth_throttler.go @@ -116,7 +116,7 @@ func (t *bandwidthThrottlerImpl) Acquire( t.lock.RUnlock() if !ok { // This should never happen. If it is, the caller is misusing this struct. - t.log.Debug("tried to acquire throttler but the node isn't registered", + t.log.Error("tried to acquire throttler but the node isn't registered", zap.Uint64("messageSize", msgSize), zap.Stringer("nodeID", nodeID), ) @@ -138,7 +138,7 @@ func (t *bandwidthThrottlerImpl) AddNode(nodeID ids.NodeID) { defer t.lock.Unlock() if _, ok := t.limiters[nodeID]; ok { - t.log.Debug("tried to add peer but it's already registered", + t.log.Error("tried to add peer but it's already registered", zap.Stringer("nodeID", nodeID), ) return @@ -152,7 +152,7 @@ func (t *bandwidthThrottlerImpl) RemoveNode(nodeID ids.NodeID) { defer t.lock.Unlock() if _, ok := t.limiters[nodeID]; !ok { - t.log.Debug("tried to remove peer but it isn't registered", + t.log.Error("tried to remove peer but it isn't registered", zap.Stringer("nodeID", nodeID), ) return diff --git a/network/throttling/inbound_msg_byte_throttler.go b/network/throttling/inbound_msg_byte_throttler.go index 237041f00a09..e99de61bedf0 100644 --- a/network/throttling/inbound_msg_byte_throttler.go +++ b/network/throttling/inbound_msg_byte_throttler.go @@ -280,7 +280,7 @@ func (t *inboundMsgByteThrottler) release(metadata *msgMetadata, nodeID ids.Node } } else { // This should never happen - t.log.Warn("couldn't find message", + t.log.Error("couldn't find message", zap.Stringer("nodeID", nodeID), zap.Uint64("messageID", msgID), ) diff --git a/node/node.go b/node/node.go index 825740037351..5555f8403574 100644 --- a/node/node.go +++ b/node/node.go @@ -629,7 +629,7 @@ func (n *Node) initNetworking(reg prometheus.Registerer) error { n.Net, err = network.NewNetwork( &n.Config.NetworkConfig, - n.Config.UpgradeConfig.EtnaTime, + n.Config.UpgradeConfig.FortunaTime, n.msgCreator, reg, n.Log, @@ -644,7 +644,9 @@ func (n *Node) initNetworking(reg prometheus.Registerer) error { // Write process context to the configured path. Supports the use of // dynamically chosen network ports with local network orchestration. func (n *Node) writeProcessContext() error { - n.Log.Info("writing process context", zap.String("path", n.Config.ProcessContextFilePath)) + n.Log.Info("writing process context", + zap.String("path", n.Config.ProcessContextFilePath), + ) // Write the process context to disk processContext := &node.ProcessContext{ @@ -1677,7 +1679,7 @@ func (n *Node) shutdown() { err := n.health.RegisterHealthCheck("shuttingDown", shuttingDownCheck, health.ApplicationTag) if err != nil { - n.Log.Debug("couldn't register shuttingDown health check", + n.Log.Warn("couldn't register shuttingDown health check", zap.Error(err), ) } @@ -1717,8 +1719,7 @@ func (n *Node) shutdown() { if n.DB != nil { if err := n.DB.Delete(ungracefulShutdown); err != nil { - n.Log.Error( - "failed to delete ungraceful shutdown key", + n.Log.Error("failed to delete ungraceful shutdown key", zap.Error(err), ) } diff --git a/snow/consensus/snowman/poll/set.go b/snow/consensus/snowman/poll/set.go index aa7e7342542a..57dc6dcb445a 100644 --- a/snow/consensus/snowman/poll/set.go +++ b/snow/consensus/snowman/poll/set.go @@ -88,18 +88,9 @@ func NewSet( // should be made. func (s *set) Add(requestID uint32, vdrs bag.Bag[ids.NodeID]) bool { if _, exists := s.polls.Get(requestID); exists { - s.log.Debug("dropping poll", - zap.String("reason", "duplicated request"), - zap.Uint32("requestID", requestID), - ) return false } - s.log.Verbo("creating poll", - zap.Uint32("requestID", requestID), - zap.Stringer("validators", &vdrs), - ) - s.polls.Put(requestID, poll{ Poll: s.factory.New(vdrs), // create the new poll start: time.Now(), @@ -110,12 +101,12 @@ func (s *set) Add(requestID uint32, vdrs bag.Bag[ids.NodeID]) bool { // Vote registers the connections response to a query for [id]. If there was no // query, or the response has already be registered, nothing is performed. -func (s *set) Vote(requestID uint32, vdr ids.NodeID, vote ids.ID) []bag.Bag[ids.ID] { +func (s *set) Vote(requestID uint32, nodeID ids.NodeID, vote ids.ID) []bag.Bag[ids.ID] { holder, exists := s.polls.Get(requestID) if !exists { s.log.Verbo("dropping vote", zap.String("reason", "unknown poll"), - zap.Stringer("validator", vdr), + zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil @@ -124,12 +115,12 @@ func (s *set) Vote(requestID uint32, vdr ids.NodeID, vote ids.ID) []bag.Bag[ids. p := holder.GetPoll() s.log.Verbo("processing vote", - zap.Stringer("validator", vdr), + zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Stringer("vote", vote), ) - p.Vote(vdr, vote) + p.Vote(nodeID, vote) if !p.Finished() { return nil } @@ -152,7 +143,7 @@ func (s *set) processFinishedPolls() []bag.Bag[ids.ID] { break } - s.log.Verbo("poll finished", + s.log.Debug("poll finished", zap.Uint32("requestID", iter.Key()), zap.Stringer("poll", holder.GetPoll()), ) @@ -170,25 +161,25 @@ func (s *set) processFinishedPolls() []bag.Bag[ids.ID] { // Drop registers the connections response to a query for [id]. If there was no // query, or the response has already be registered, nothing is performed. -func (s *set) Drop(requestID uint32, vdr ids.NodeID) []bag.Bag[ids.ID] { +func (s *set) Drop(requestID uint32, nodeID ids.NodeID) []bag.Bag[ids.ID] { holder, exists := s.polls.Get(requestID) if !exists { s.log.Verbo("dropping vote", zap.String("reason", "unknown poll"), - zap.Stringer("validator", vdr), + zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } s.log.Verbo("processing dropped vote", - zap.Stringer("validator", vdr), + zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) poll := holder.GetPoll() - poll.Drop(vdr) + poll.Drop(nodeID) if !poll.Finished() { return nil } diff --git a/snow/consensus/snowman/topological.go b/snow/consensus/snowman/topological.go index e6aabdbf3dbe..2873de1d6a46 100644 --- a/snow/consensus/snowman/topological.go +++ b/snow/consensus/snowman/topological.go @@ -142,27 +142,22 @@ func (ts *Topological) NumProcessing() int { } func (ts *Topological) Add(blk Block) error { - blkID := blk.ID() - height := blk.Height() - ts.ctx.Log.Verbo("adding block", - zap.Stringer("blkID", blkID), - zap.Uint64("height", height), - ) - // Make sure a block is not inserted twice. + blkID := blk.ID() if ts.Processing(blkID) { return errDuplicateAdd } - ts.metrics.Verified(height) - ts.metrics.Issued(blkID, ts.pollNumber) - parentID := blk.Parent() parentNode, ok := ts.blocks[parentID] if !ok { return errUnknownParentBlock } + height := blk.Height() + ts.metrics.Verified(height) + ts.metrics.Issued(blkID, ts.pollNumber) + // add the block as a child of its parent, and add the block to the tree parentNode.AddChild(blk) ts.blocks[blkID] = &snowmanBlock{ @@ -177,7 +172,7 @@ func (ts *Topological) Add(blk Block) error { ts.preferredHeights[height] = blkID } - ts.ctx.Log.Verbo("added block", + ts.ctx.Log.Debug("added block", zap.Stringer("blkID", blkID), zap.Uint64("height", height), zap.Stringer("parentID", parentID), @@ -552,9 +547,6 @@ func (ts *Topological) vote(ctx context.Context, voteStack []votes) (ids.ID, err if notRejected { ts.ctx.Log.Verbo("deferring confidence reset of child block", zap.Stringer("childID", childID), - ) - - ts.ctx.Log.Verbo("voting for next block", zap.Stringer("nextID", nextID), ) diff --git a/snow/engine/avalanche/bootstrap/bootstrapper.go b/snow/engine/avalanche/bootstrap/bootstrapper.go index 4acd38cee291..1beceb24666a 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper.go @@ -136,7 +136,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request } requestedVtxID, ok := b.outstandingRequests.DeleteKey(request) if !ok { // this message isn't in response to a request we made - b.Ctx.Log.Debug("received unexpected Ancestors", + b.Ctx.Log.Trace("received unexpected Ancestors", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -147,7 +147,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request lenVtxs := len(vtxs) if lenVtxs == 0 { - b.Ctx.Log.Debug("Ancestors contains no vertices", + b.Ctx.Log.Trace("Ancestors contains no vertices", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -159,7 +159,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request if lenVtxs > b.Config.AncestorsMaxContainersReceived { vtxs = vtxs[:b.Config.AncestorsMaxContainersReceived] - b.Ctx.Log.Debug("ignoring containers in Ancestors", + b.Ctx.Log.Trace("ignoring containers in Ancestors", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Int("numIgnored", lenVtxs-b.Config.AncestorsMaxContainersReceived), @@ -168,7 +168,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request vtx, err := b.Manager.ParseVtx(ctx, vtxs[0]) if err != nil { - b.Ctx.Log.Debug("failed to parse requested vertex", + b.Ctx.Log.Trace("failed to parse requested vertex", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Stringer("vtxID", requestedVtxID), @@ -180,7 +180,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request } if actualID := vtx.ID(); actualID != requestedVtxID { - b.Ctx.Log.Debug("received incorrect vertex", + b.Ctx.Log.Trace("received incorrect vertex", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Stringer("vtxID", actualID), @@ -212,7 +212,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request for _, vtxBytes := range vtxs[1:] { vtx, err := b.Manager.ParseVtx(ctx, vtxBytes) // Persists the vtx if err != nil { - b.Ctx.Log.Debug("failed to parse vertex", + b.Ctx.Log.Trace("failed to parse vertex", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Error(err), @@ -221,7 +221,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request } vtxID := vtx.ID() if !eligibleVertices.Contains(vtxID) { - b.Ctx.Log.Debug("received vertex that should not have been included", + b.Ctx.Log.Trace("received vertex that should not have been included", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Stringer("vtxID", vtxID), @@ -260,7 +260,7 @@ func (b *Bootstrapper) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID } vtxID, ok := b.outstandingRequests.DeleteKey(request) if !ok { - b.Ctx.Log.Debug("unexpectedly called GetAncestorsFailed", + b.Ctx.Log.Trace("unexpectedly called GetAncestorsFailed", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) diff --git a/snow/engine/avalanche/bootstrap/tx_job.go b/snow/engine/avalanche/bootstrap/tx_job.go index 62a664ddc157..cf7ec51ee7db 100644 --- a/snow/engine/avalanche/bootstrap/tx_job.go +++ b/snow/engine/avalanche/bootstrap/tx_job.go @@ -84,7 +84,7 @@ func (t *txJob) Execute(ctx context.Context) error { } t.numAccepted.Inc() - t.log.Trace("accepting transaction in bootstrapping", + t.log.Debug("accepting transaction in bootstrapping", zap.Stringer("txID", txID), ) if err := t.tx.Accept(ctx); err != nil { diff --git a/snow/engine/avalanche/bootstrap/vertex_job.go b/snow/engine/avalanche/bootstrap/vertex_job.go index a9326c08fc78..130a952bc12b 100644 --- a/snow/engine/avalanche/bootstrap/vertex_job.go +++ b/snow/engine/avalanche/bootstrap/vertex_job.go @@ -104,7 +104,7 @@ func (v *vertexJob) Execute(ctx context.Context) error { return fmt.Errorf("attempting to execute vertex with status %s", status) case choices.Processing: v.numAccepted.Inc() - v.log.Trace("accepting vertex in bootstrapping", + v.log.Debug("accepting vertex in bootstrapping", zap.Stringer("vtxID", v.vtx.ID()), ) if err := v.vtx.Accept(ctx); err != nil { diff --git a/snow/engine/avalanche/getter/getter.go b/snow/engine/avalanche/getter/getter.go index f91324a1c85d..2ddd52e6ce7a 100644 --- a/snow/engine/avalanche/getter/getter.go +++ b/snow/engine/avalanche/getter/getter.go @@ -111,8 +111,10 @@ func (gh *getter) GetAncestors(ctx context.Context, nodeID ids.NodeID, requestID vertex, err := gh.storage.GetVtx(ctx, vtxID) if err != nil || vertex.Status() == choices.Unknown { // Don't have the requested vertex. Drop message. - gh.log.Verbo("dropping getAncestors") - return nil //nolint:nilerr + gh.log.Verbo("dropping getAncestors", + zap.Error(err), + ) + return nil } queue := make([]avalanche.Vertex, 1, gh.maxContainersGetAncestors) // for BFS diff --git a/snow/engine/snowman/bootstrap/bootstrapper.go b/snow/engine/snowman/bootstrap/bootstrapper.go index bf178cd1f8da..ba9e365d5103 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/snow/engine/snowman/bootstrap/bootstrapper.go @@ -145,7 +145,9 @@ func New(config Config, onFinished func(ctx context.Context, lastReqID uint32) e defer config.Ctx.Lock.Unlock() if err := bs.Timeout(); err != nil { - bs.Config.Ctx.Log.Warn("Encountered error during bootstrapping: %w", zap.Error(err)) + bs.Config.Ctx.Log.Warn("Encountered error during bootstrapping", + zap.Error(err), + ) } } bs.TimeoutRegistrar = common.NewTimeoutScheduler(timeout, config.BootstrapTracker.AllBootstrapped()) @@ -456,7 +458,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request } wantedBlkID, ok := b.outstandingRequests.DeleteKey(request) if !ok { // this message isn't in response to a request we made - b.Ctx.Log.Debug("received unexpected Ancestors", + b.Ctx.Log.Trace("received unexpected Ancestors", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -480,7 +482,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request if lenBlks > b.Config.AncestorsMaxContainersReceived { blks = blks[:b.Config.AncestorsMaxContainersReceived] - b.Ctx.Log.Debug("ignoring containers in Ancestors", + b.Ctx.Log.Trace("ignoring containers in Ancestors", zap.Int("numContainers", lenBlks-b.Config.AncestorsMaxContainersReceived), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), @@ -489,7 +491,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request blocks, err := block.BatchedParseBlock(ctx, b.VM, blks) if err != nil { // the provided blocks couldn't be parsed - b.Ctx.Log.Debug("failed to parse blocks in Ancestors", + b.Ctx.Log.Trace("failed to parse blocks in Ancestors", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Error(err), @@ -499,7 +501,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request } if len(blocks) == 0 { - b.Ctx.Log.Debug("parsing blocks returned an empty set of blocks", + b.Ctx.Log.Trace("parsing blocks returned an empty set of blocks", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -509,7 +511,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request requestedBlock := blocks[0] if actualID := requestedBlock.ID(); actualID != wantedBlkID { - b.Ctx.Log.Debug("first block is not the requested block", + b.Ctx.Log.Trace("first block is not the requested block", zap.Stringer("expectedBlkID", wantedBlkID), zap.Stringer("blkID", actualID), ) @@ -548,7 +550,7 @@ func (b *Bootstrapper) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID } blkID, ok := b.outstandingRequests.DeleteKey(request) if !ok { - b.Ctx.Log.Debug("unexpectedly called GetAncestorsFailed", + b.Ctx.Log.Trace("unexpectedly called GetAncestorsFailed", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) diff --git a/snow/engine/snowman/engine.go b/snow/engine/snowman/engine.go index 7771f3c50bf7..a69d21b820f7 100644 --- a/snow/engine/snowman/engine.go +++ b/snow/engine/snowman/engine.go @@ -216,7 +216,7 @@ func (e *Engine) Put(ctx context.Context, nodeID ids.NodeID, requestID uint32, b zap.Error(err), ) } else { - e.Ctx.Log.Debug("failed to parse block", + e.Ctx.Log.Trace("failed to parse block", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Error(err), @@ -239,7 +239,7 @@ func (e *Engine) Put(ctx context.Context, nodeID ids.NodeID, requestID uint32, b case ok: actualBlkID := blk.ID() if actualBlkID != expectedBlkID { - e.Ctx.Log.Debug("incorrect block returned in Put", + e.Ctx.Log.Trace("incorrect block returned in Put", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Stringer("blkID", actualBlkID), @@ -286,7 +286,7 @@ func (e *Engine) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID uin } blkID, ok := e.blkReqs.DeleteKey(req) if !ok { - e.Ctx.Log.Debug("unexpected GetFailed", + e.Ctx.Log.Trace("unexpected GetFailed", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -330,7 +330,7 @@ func (e *Engine) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID uin zap.Error(err), ) } else { - e.Ctx.Log.Debug("failed to parse block", + e.Ctx.Log.Trace("failed to parse block", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Error(err), @@ -823,7 +823,7 @@ func (e *Engine) issue( // either the last accepted block or is not decided. var deps []ids.ID if parentID := blk.Parent(); !e.canIssueChildOn(parentID) { - e.Ctx.Log.Verbo("block waiting for parent to be issued", + e.Ctx.Log.Debug("block waiting for parent to be issued", zap.Stringer("blkID", blkID), zap.Stringer("parentID", parentID), ) @@ -853,7 +853,7 @@ func (e *Engine) sendRequest( e.blkReqs.Put(req, blkID) e.blkReqSourceMetric[req] = issuedMetric - e.Ctx.Log.Verbo("sending Get request", + e.Ctx.Log.Debug("sending Get request", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", e.requestID), zap.Stringer("blkID", blkID), @@ -874,11 +874,7 @@ func (e *Engine) sendQuery( return } - e.Ctx.Log.Verbo("sampling from validators", - zap.Stringer("validators", e.Validators), - ) - - vdrIDs, err := e.Validators.Sample(e.Ctx.SubnetID, e.Params.K) + nodeIDs, err := e.Validators.Sample(e.Ctx.SubnetID, e.Params.K) if err != nil { e.Ctx.Log.Warn("dropped query for block", zap.String("reason", "insufficient number of validators"), @@ -900,9 +896,9 @@ func (e *Engine) sendQuery( return } - vdrBag := bag.Of(vdrIDs...) + nodeIDBag := bag.Of(nodeIDs...) e.requestID++ - if !e.polls.Add(e.requestID, vdrBag) { + if !e.polls.Add(e.requestID, nodeIDBag) { e.Ctx.Log.Error("dropped query for block", zap.String("reason", "failed to add poll"), zap.Stringer("blkID", blkID), @@ -911,11 +907,18 @@ func (e *Engine) sendQuery( return } - vdrSet := set.Of(vdrIDs...) + e.Ctx.Log.Debug("sending query", + zap.Stringer("blkID", blkID), + zap.Uint32("requestID", e.requestID), + zap.Stringers("nodeIDs", nodeIDs), + zap.Bool("push", push), + ) + + nodeIDSet := set.Of(nodeIDs...) if push { - e.Sender.SendPushQuery(ctx, vdrSet, e.requestID, blkBytes, nextHeightToAccept) + e.Sender.SendPushQuery(ctx, nodeIDSet, e.requestID, blkBytes, nextHeightToAccept) } else { - e.Sender.SendPullQuery(ctx, vdrSet, e.requestID, blkID, nextHeightToAccept) + e.Sender.SendPullQuery(ctx, nodeIDSet, e.requestID, blkID, nextHeightToAccept) } } @@ -1093,7 +1096,7 @@ func (e *Engine) addUnverifiedBlockToConsensus( e.unverifiedIDToAncestor.Remove(blkID) e.unverifiedBlockCache.Evict(blkID) e.metrics.issuerStake.Observe(float64(e.Validators.GetWeight(e.Ctx.SubnetID, nodeID))) - e.Ctx.Log.Verbo("adding block to consensus", + e.Ctx.Log.Debug("adding block to consensus", zap.Stringer("nodeID", nodeID), zap.Stringer("blkID", blkID), zap.Uint64("height", blkHeight), @@ -1131,7 +1134,7 @@ func (e *Engine) getProcessingAncestor(initialVote ids.ID) (ids.ID, bool) { // If we haven't cached the block, drop [vote]. blk, ok := e.unverifiedBlockCache.Get(bubbledVote) if !ok { - e.Ctx.Log.Debug("dropping vote", + e.Ctx.Log.Verbo("dropping vote", zap.String("reason", "ancestor isn't cached"), zap.Stringer("initialVoteID", initialVote), zap.Stringer("bubbledVoteID", bubbledVote), @@ -1141,7 +1144,7 @@ func (e *Engine) getProcessingAncestor(initialVote ids.ID) (ids.ID, bool) { } if e.isDecided(blk) { - e.Ctx.Log.Debug("dropping vote", + e.Ctx.Log.Verbo("dropping vote", zap.String("reason", "bubbled vote already decided"), zap.Stringer("initialVoteID", initialVote), zap.Stringer("bubbledVoteID", bubbledVote), diff --git a/snow/engine/snowman/getter/getter.go b/snow/engine/snowman/getter/getter.go index 7af3d3a2263e..3a3377b33f81 100644 --- a/snow/engine/snowman/getter/getter.go +++ b/snow/engine/snowman/getter/getter.go @@ -214,7 +214,7 @@ func (gh *getter) Get(ctx context.Context, nodeID ids.NodeID, requestID uint32, // If we failed to get the block, that means either an unexpected error // has occurred, [vdr] is not following the protocol, or the // block has been pruned. - gh.log.Debug("failed Get request", + gh.log.Trace("failed Get request", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Stringer("blkID", blkID), diff --git a/snow/engine/snowman/syncer/state_syncer.go b/snow/engine/snowman/syncer/state_syncer.go index 7e669fee2534..9cbc29cfa858 100644 --- a/snow/engine/snowman/syncer/state_syncer.go +++ b/snow/engine/snowman/syncer/state_syncer.go @@ -197,7 +197,7 @@ func (ss *stateSyncer) StateSummaryFrontier(ctx context.Context, nodeID ids.Node zap.Error(err), ) } else { - ss.Ctx.Log.Debug("failed to parse summary", + ss.Ctx.Log.Trace("failed to parse summary", zap.Error(err), ) } @@ -298,7 +298,7 @@ func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.Node for summaryID := range summaryIDs { ws, ok := ss.weightedSummaries[summaryID] if !ok { - ss.Ctx.Log.Debug("skipping summary", + ss.Ctx.Log.Trace("skipping summary", zap.String("reason", "unknown summary"), zap.Stringer("nodeID", nodeID), zap.Stringer("summaryID", summaryID), diff --git a/snow/networking/handler/handler.go b/snow/networking/handler/handler.go index d25d6b8a1493..1615404d5607 100644 --- a/snow/networking/handler/handler.go +++ b/snow/networking/handler/handler.go @@ -540,7 +540,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { case *p2ppb.AcceptedStateSummary: summaryIDs, err := getIDs(msg.SummaryIds) if err != nil { - h.ctx.Log.Debug("message with invalid field", + h.ctx.Log.Trace("message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", message.AcceptedStateSummaryOp), zap.Uint32("requestID", msg.RequestId), @@ -563,7 +563,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { case *p2ppb.AcceptedFrontier: containerID, err := ids.ToID(msg.ContainerId) if err != nil { - h.ctx.Log.Debug("message with invalid field", + h.ctx.Log.Trace("message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", message.AcceptedFrontierOp), zap.Uint32("requestID", msg.RequestId), @@ -581,7 +581,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { case *p2ppb.GetAccepted: containerIDs, err := getIDs(msg.ContainerIds) if err != nil { - h.ctx.Log.Debug("message with invalid field", + h.ctx.Log.Trace("message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", message.GetAcceptedOp), zap.Uint32("requestID", msg.RequestId), @@ -596,7 +596,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { case *p2ppb.Accepted: containerIDs, err := getIDs(msg.ContainerIds) if err != nil { - h.ctx.Log.Debug("message with invalid field", + h.ctx.Log.Trace("message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", message.AcceptedOp), zap.Uint32("requestID", msg.RequestId), @@ -614,7 +614,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { case *p2ppb.GetAncestors: containerID, err := ids.ToID(msg.ContainerId) if err != nil { - h.ctx.Log.Debug("dropping message with invalid field", + h.ctx.Log.Trace("dropping message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", message.GetAncestorsOp), zap.Uint32("requestID", msg.RequestId), @@ -635,7 +635,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { case *p2ppb.Get: containerID, err := ids.ToID(msg.ContainerId) if err != nil { - h.ctx.Log.Debug("dropping message with invalid field", + h.ctx.Log.Trace("dropping message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", message.GetOp), zap.Uint32("requestID", msg.RequestId), @@ -659,7 +659,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { case *p2ppb.PullQuery: containerID, err := ids.ToID(msg.ContainerId) if err != nil { - h.ctx.Log.Debug("dropping message with invalid field", + h.ctx.Log.Trace("dropping message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", message.PullQueryOp), zap.Uint32("requestID", msg.RequestId), @@ -674,7 +674,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { case *p2ppb.Chits: preferredID, err := ids.ToID(msg.PreferredId) if err != nil { - h.ctx.Log.Debug("message with invalid field", + h.ctx.Log.Trace("message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", message.ChitsOp), zap.Uint32("requestID", msg.RequestId), @@ -686,7 +686,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { preferredIDAtHeight, err := ids.ToID(msg.PreferredIdAtHeight) if err != nil { - h.ctx.Log.Debug("message with invalid field", + h.ctx.Log.Trace("message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", message.ChitsOp), zap.Uint32("requestID", msg.RequestId), @@ -698,7 +698,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { acceptedID, err := ids.ToID(msg.AcceptedId) if err != nil { - h.ctx.Log.Debug("message with invalid field", + h.ctx.Log.Trace("message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", message.ChitsOp), zap.Uint32("requestID", msg.RequestId), diff --git a/snow/networking/router/chain_router.go b/snow/networking/router/chain_router.go index 3893d2aeeb94..2654c663cfd1 100644 --- a/snow/networking/router/chain_router.go +++ b/snow/networking/router/chain_router.go @@ -210,7 +210,7 @@ func (cr *ChainRouter) HandleInbound(ctx context.Context, msg message.InboundMes m := msg.Message() chainID, err := message.GetChainID(m) if err != nil { - cr.log.Debug("dropping message with invalid field", + cr.log.Trace("dropping message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", op), zap.String("field", "ChainID"), @@ -223,7 +223,7 @@ func (cr *ChainRouter) HandleInbound(ctx context.Context, msg message.InboundMes requestID, ok := message.GetRequestID(m) if !ok { - cr.log.Debug("dropping message with invalid field", + cr.log.Trace("dropping message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", op), zap.String("field", "RequestID"), @@ -400,7 +400,7 @@ func (cr *ChainRouter) AddChain(ctx context.Context, chain handler.Handler) { ) return } - cr.log.Debug("registering chain with chain router", + cr.log.Info("registering chain with chain router", zap.Stringer("chainID", chainID), ) chain.SetOnStopped(func() { diff --git a/upgrade/upgrade.go b/upgrade/upgrade.go index d49d964e8d25..016250ec3ed3 100644 --- a/upgrade/upgrade.go +++ b/upgrade/upgrade.go @@ -35,7 +35,7 @@ var ( CortinaXChainStopVertexID: ids.FromStringOrPanic("jrGWDh5Po9FMj54depyunNixpia5PN4aAYxfmNzU8n752Rjga"), DurangoTime: time.Date(2024, time.March, 6, 16, 0, 0, 0, time.UTC), EtnaTime: time.Date(2024, time.December, 16, 17, 0, 0, 0, time.UTC), - FortunaTime: UnscheduledActivationTime, + FortunaTime: time.Date(2025, time.April, 8, 15, 0, 0, 0, time.UTC), } Fuji = Config{ ApricotPhase1Time: time.Date(2021, time.March, 26, 14, 0, 0, 0, time.UTC), @@ -56,7 +56,7 @@ var ( CortinaXChainStopVertexID: ids.FromStringOrPanic("2D1cmbiG36BqQMRyHt4kFhWarmatA1ighSpND3FeFgz3vFVtCZ"), DurangoTime: time.Date(2024, time.February, 13, 16, 0, 0, 0, time.UTC), EtnaTime: time.Date(2024, time.November, 25, 16, 0, 0, 0, time.UTC), - FortunaTime: UnscheduledActivationTime, + FortunaTime: time.Date(2025, time.March, 13, 15, 0, 0, 0, time.UTC), } Default = Config{ ApricotPhase1Time: InitiallyActiveTime, @@ -73,7 +73,7 @@ var ( CortinaXChainStopVertexID: ids.Empty, DurangoTime: InitiallyActiveTime, EtnaTime: InitiallyActiveTime, - FortunaTime: UnscheduledActivationTime, + FortunaTime: InitiallyActiveTime, } ErrInvalidUpgradeTimes = errors.New("invalid upgrade configuration") diff --git a/utils/constants/acps.go b/utils/constants/acps.go index dbf9db38ee94..76bd25b9f234 100644 --- a/utils/constants/acps.go +++ b/utils/constants/acps.go @@ -37,5 +37,7 @@ var ( ) // ScheduledACPs are the ACPs included into the next upgrade. - ScheduledACPs = set.Of[uint32]() + ScheduledACPs = set.Of[uint32]( + 176, // https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/176-dynamic-evm-gas-limit-and-price-discovery-updates/README.md + ) ) diff --git a/utils/logging/log.go b/utils/logging/log.go index 8ed585524099..ee50c79bdd42 100644 --- a/utils/logging/log.go +++ b/utils/logging/log.go @@ -136,7 +136,10 @@ func (l *log) SetLevel(level Level) { func (l *log) StopOnPanic() { if r := recover(); r != nil { - l.Fatal("panicking", zap.Any("reason", r), zap.Stack("from")) + l.Fatal("panicking", + zap.Any("reason", r), + zap.Stack("from"), + ) l.Stop() panic(r) } @@ -149,7 +152,10 @@ func (l *log) RecoverAndPanic(f func()) { func (l *log) stopAndExit(exit func()) { if r := recover(); r != nil { - l.Fatal("panicking", zap.Any("reason", r), zap.Stack("from")) + l.Fatal("panicking", + zap.Any("reason", r), + zap.Stack("from"), + ) l.Stop() exit() } diff --git a/version/compatibility.json b/version/compatibility.json index 57f9713e7986..1d4abb548fb2 100644 --- a/version/compatibility.json +++ b/version/compatibility.json @@ -1,6 +1,7 @@ { "39": [ - "v1.12.2" + "v1.12.2", + "v1.13.0" ], "38": [ "v1.11.13", diff --git a/version/constants.go b/version/constants.go index 4598a5930dc3..ead209ab3907 100644 --- a/version/constants.go +++ b/version/constants.go @@ -22,8 +22,8 @@ const ( var ( Current = &Semantic{ Major: 1, - Minor: 12, - Patch: 2, + Minor: 13, + Patch: 0, } CurrentApp = &Application{ Name: Client, @@ -34,13 +34,13 @@ var ( MinimumCompatibleVersion = &Application{ Name: Client, Major: 1, - Minor: 12, + Minor: 13, Patch: 0, } PrevMinimumCompatibleVersion = &Application{ Name: Client, Major: 1, - Minor: 11, + Minor: 12, Patch: 0, } diff --git a/vms/avm/block/executor/block.go b/vms/avm/block/executor/block.go index 01e0c5d4212a..fa4ebea745e9 100644 --- a/vms/avm/block/executor/block.go +++ b/vms/avm/block/executor/block.go @@ -246,8 +246,7 @@ func (b *Block) Accept(context.Context) error { return err } - b.manager.backend.Ctx.Log.Trace( - "accepted block", + b.manager.backend.Ctx.Log.Debug("accepted block", zap.Stringer("blkID", blkID), zap.Uint64("height", b.Height()), zap.Stringer("parentID", b.Parent()), diff --git a/vms/platformvm/block/executor/acceptor.go b/vms/platformvm/block/executor/acceptor.go index 45409c69a7ab..3873fe81330a 100644 --- a/vms/platformvm/block/executor/acceptor.go +++ b/vms/platformvm/block/executor/acceptor.go @@ -104,8 +104,7 @@ func (a *acceptor) ApricotAtomicBlock(b *block.ApricotAtomicBlock) error { ) } - a.ctx.Log.Trace( - "accepted block", + a.ctx.Log.Debug("accepted block", zap.String("blockType", "apricot atomic"), zap.Stringer("blkID", blkID), zap.Uint64("height", b.Height()), @@ -174,8 +173,7 @@ func (a *acceptor) optionBlock(b block.Block, blockType string) error { onAcceptFunc() } - a.ctx.Log.Trace( - "accepted block", + a.ctx.Log.Debug("accepted block", zap.String("blockType", blockType), zap.Stringer("blkID", blkID), zap.Uint64("height", b.Height()), @@ -206,8 +204,7 @@ func (a *acceptor) proposalBlock(b block.Block, blockType string) { blkID := b.ID() a.backend.lastAccepted = blkID - a.ctx.Log.Trace( - "accepted block", + a.ctx.Log.Debug("accepted block", zap.String("blockType", blockType), zap.Stringer("blkID", blkID), zap.Uint64("height", b.Height()), @@ -253,8 +250,7 @@ func (a *acceptor) standardBlock(b block.Block, blockType string) error { onAcceptFunc() } - a.ctx.Log.Trace( - "accepted block", + a.ctx.Log.Debug("accepted block", zap.String("blockType", blockType), zap.Stringer("blkID", blkID), zap.Uint64("height", b.Height()), diff --git a/vms/platformvm/block/executor/options.go b/vms/platformvm/block/executor/options.go index 7487ea449335..927222163f74 100644 --- a/vms/platformvm/block/executor/options.go +++ b/vms/platformvm/block/executor/options.go @@ -75,7 +75,7 @@ func (o *options) BanffProposalBlock(b *block.BanffProposalBlock) error { prefersCommit, err := o.prefersCommit(b.Tx) if err != nil { - o.log.Debug("falling back to prefer commit", + o.log.Trace("falling back to prefer commit", zap.Error(err), ) // We fall back to commit here to err on the side of over-rewarding diff --git a/vms/platformvm/block/executor/rejector.go b/vms/platformvm/block/executor/rejector.go index b5dde1f6e84c..88c40e3c5d9c 100644 --- a/vms/platformvm/block/executor/rejector.go +++ b/vms/platformvm/block/executor/rejector.go @@ -73,8 +73,7 @@ func (r *rejector) rejectBlock(b block.Block, blockType string) error { for _, tx := range b.Txs() { if err := r.Mempool.Add(tx); err != nil { - r.ctx.Log.Debug( - "failed to reissue tx", + r.ctx.Log.Debug("failed to reissue tx", zap.Stringer("txID", tx.ID()), zap.Stringer("blkID", blkID), zap.Error(err), diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 90540203e8d9..b3d7850aa65e 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -104,13 +104,15 @@ func (vm *VM) Initialize( _ []*common.Fx, appSender common.AppSender, ) error { - chainCtx.Log.Verbo("initializing platform chain") + chainCtx.Log.Info("initializing platform chain") execConfig, err := config.GetConfig(configBytes) if err != nil { return err } - chainCtx.Log.Info("using VM execution config", zap.Reflect("config", execConfig)) + chainCtx.Log.Info("using VM execution config", + zap.Reflect("config", execConfig), + ) registerer, err := metrics.MakeAndRegister(chainCtx.Metrics, "") if err != nil { @@ -257,7 +259,7 @@ func (vm *VM) periodicallyPruneMempool(frequency time.Duration) { return case <-ticker.C: if err := vm.pruneMempool(); err != nil { - vm.ctx.Log.Debug("pruning mempool failed", + vm.ctx.Log.Error("pruning mempool failed", zap.Error(err), ) } @@ -279,8 +281,7 @@ func (vm *VM) pruneMempool() error { for _, tx := range blockTxs { if err := vm.Builder.Add(tx); err != nil { - vm.ctx.Log.Debug( - "failed to reissue tx", + vm.ctx.Log.Error("failed to reissue tx", zap.Stringer("txID", tx.ID()), zap.Error(err), ) diff --git a/x/sync/manager.go b/x/sync/manager.go index 8a5075c9901e..3b5b21ad45e0 100644 --- a/x/sync/manager.go +++ b/x/sync/manager.go @@ -201,7 +201,9 @@ func (m *Manager) Start(ctx context.Context) error { return ErrAlreadyStarted } - m.config.Log.Info("starting sync", zap.Stringer("target root", m.config.TargetRoot)) + m.config.Log.Info("starting state sync", + zap.Stringer("target root", m.config.TargetRoot), + ) // Add work item to fetch the entire key range. // Note that this will be the first work item to be processed. @@ -373,7 +375,10 @@ func (m *Manager) requestChangeProof(ctx context.Context, work *workItem) { if err := m.handleChangeProofResponse(ctx, targetRootID, work, request, responseBytes, err); err != nil { // TODO log responses - m.config.Log.Debug("dropping response", zap.Error(err), zap.Stringer("request", request)) + m.config.Log.Debug("dropping response", + zap.Stringer("request", request), + zap.Error(err), + ) m.retryWork(work) return } @@ -431,7 +436,10 @@ func (m *Manager) requestRangeProof(ctx context.Context, work *workItem) { if err := m.handleRangeProofResponse(ctx, targetRootID, work, request, responseBytes, appErr); err != nil { // TODO log responses - m.config.Log.Debug("dropping response", zap.Error(err), zap.Stringer("request", request)) + m.config.Log.Debug("dropping response", + zap.Stringer("request", request), + zap.Error(err), + ) m.retryWork(work) return } @@ -858,7 +866,9 @@ func (m *Manager) Wait(ctx context.Context) error { return fmt.Errorf("%w: expected %s, got %s", ErrFinishedWithUnexpectedRoot, targetRootID, root) } - m.config.Log.Info("completed", zap.Stringer("root", root)) + m.config.Log.Info("state sync completed", + zap.Stringer("root", root), + ) return nil } @@ -880,7 +890,9 @@ func (m *Manager) UpdateSyncTarget(syncTargetRoot ids.ID) error { return nil } - m.config.Log.Debug("updated sync target", zap.Stringer("target", syncTargetRoot)) + m.config.Log.Debug("updated sync target", + zap.Stringer("target", syncTargetRoot), + ) m.config.TargetRoot = syncTargetRoot // move all completed ranges into the work heap with high priority @@ -913,7 +925,9 @@ func (m *Manager) setError(err error) { m.errLock.Lock() defer m.errLock.Unlock() - m.config.Log.Error("sync errored", zap.Error(err)) + m.config.Log.Error("sync errored", + zap.Error(err), + ) m.fatalError = err // Call in goroutine because we might be holding [m.workLock] // which [m.Close] will try to acquire.