@@ -187,15 +187,65 @@ proc sendWantBlock(
187187proc refreshBlockKnowledge (
188188 self: BlockExcEngine , peer: BlockExcPeerCtx
189189) {.async : (raises: [CancelledError ]).} =
190- if self.pendingBlocks.wantListLen > 0 :
191- # We send only blocks that the peer hasn't already told us that they already have.
192- let
193- peerHave = peer.peerHave
194- toAsk = self.pendingBlocks.wantList.toSeq.filterIt (it notin peerHave)
190+ if self.pendingBlocks.wantListLen == 0 :
191+ if peer.lastSentWants.len > 0 :
192+ trace " Clearing want list tracking, no pending blocks" , peer = peer.id
193+ peer.lastSentWants.clear ()
194+ return
195+
196+ # We send only blocks that the peer hasn't already told us that they already have.
197+ let
198+ peerHave = peer.peerHave
199+ toAsk = toHashSet (self.pendingBlocks.wantList.toSeq.filterIt (it notin peerHave))
200+
201+ if toAsk.len == 0 :
202+ if peer.lastSentWants.len > 0 :
203+ trace " Clearing want list tracking, peer has all blocks" , peer = peer.id
204+ peer.lastSentWants.clear ()
205+ return
206+
207+ let newWants = toAsk - peer.lastSentWants
208+
209+ if peer.lastSentWants.len > 0 :
210+ if newWants.len > 0 :
211+ trace " Sending delta want list update" ,
212+ peer = peer.id, newWants = newWants.len, totalWants = toAsk.len
213+
214+ let newWantsSeq = newWants.toSeq
215+ var offset = 0
216+ while offset < newWantsSeq.len:
217+ let batchEnd = min (offset + MaxWantListBatchSize , newWantsSeq.len)
218+ let batch = newWantsSeq[offset ..< batchEnd]
219+
220+ trace " Sending want list batch" ,
221+ peer = peer.id,
222+ batchSize = batch.len,
223+ offset = offset,
224+ total = newWantsSeq.len
225+
226+ await self.network.request.sendWantList (peer.id, batch, full = false )
227+ offset = batchEnd
228+ else :
229+ trace " No changes in want list, skipping send" , peer = peer.id
230+
231+ peer.lastSentWants = toAsk
232+ else :
233+ trace " Sending full want list" ,
234+ peer = peer.id, length = toAsk.len, reason = " first_send"
235+
236+ let toAskSeq = toAsk.toSeq
237+ var offset = 0
238+ while offset < toAskSeq.len:
239+ let batchEnd = min (offset + MaxWantListBatchSize , toAskSeq.len)
240+ let batch = toAskSeq[offset ..< batchEnd]
241+
242+ trace " Sending full want list batch" ,
243+ peer = peer.id, batchSize = batch.len, offset = offset, total = toAskSeq.len
244+
245+ await self.network.request.sendWantList (peer.id, batch, full = (offset == 0 ))
246+ offset = batchEnd
195247
196- if toAsk.len > 0 :
197- trace " Sending want list to a peer" , peer = peer.id, length = toAsk.len
198- await self.network.request.sendWantList (peer.id, toAsk, full = true )
248+ peer.lastSentWants = toAsk
199249
200250proc refreshBlockKnowledge (self: BlockExcEngine ) {.async : (raises: [CancelledError ]).} =
201251 let runtimeQuota = 10 .milliseconds
@@ -215,9 +265,6 @@ proc refreshBlockKnowledge(self: BlockExcEngine) {.async: (raises: [CancelledErr
215265 if peer.isKnowledgeStale or peer.lastRefresh < self.pendingBlocks.lastInclusion:
216266 if not peer.refreshInProgress:
217267 peer.refreshRequested ()
218- # TODO : optimize this by keeping track of what was sent and sending deltas.
219- # This should allow us to run much more frequent refreshes, and be way more
220- # efficient about it.
221268 await self.refreshBlockKnowledge (peer)
222269 else :
223270 trace " Not refreshing: peer is up to date" , peer = peer.id
0 commit comments