diff --git a/.gitmodules b/.gitmodules index 45f88f8e32..4477ae4814 100644 --- a/.gitmodules +++ b/.gitmodules @@ -42,9 +42,9 @@ branch = devel [submodule "vendor/nim-web3"] path = vendor/nim-web3 - url = https://github.com/status-im/nim-web3.git + url = https://github.com/RazorClient/nim-web3.git ignore = untracked - branch = master + branch = Focil-upstream [submodule "vendor/nim-nat-traversal"] path = vendor/nim-nat-traversal url = https://github.com/status-im/nim-nat-traversal.git diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index 04d67e1505..aa86b73760 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -86,6 +86,7 @@ type syncCommitteeMsgPool*: ref SyncCommitteeMsgPool lightClientPool*: ref LightClientPool validatorChangePool*: ref ValidatorChangePool + inclusionListStore*: ref InclusionListStore elManager*: ELManager restServer*: RestServerRef keymanagerHost*: ref KeymanagerHost diff --git a/beacon_chain/consensus_object_pools/attestation_pool.nim b/beacon_chain/consensus_object_pools/attestation_pool.nim index af9c5e3887..1cc6a74d96 100644 --- a/beacon_chain/consensus_object_pools/attestation_pool.nim +++ b/beacon_chain/consensus_object_pools/attestation_pool.nim @@ -13,7 +13,7 @@ import chronicles, stew/byteutils, # Internal ../spec/[ - beaconstate, eth2_merkleization, forks, state_transition_epoch, validator], + beaconstate, eth2_merkleization, forks, state_transition_epoch, validator,datatypes/focil], "."/[spec_cache, blockchain_dag, block_quarantine], ../fork_choice/fork_choice, ../beacon_clock @@ -208,6 +208,16 @@ proc addForkChoiceVotes( # hopefully the fork choice will heal itself over time. error "Couldn't add attestation to fork choice, bug?", err = v.error() +proc onInclusionList*(pool: var AttestationPool, + inclusionList: focil.SignedInclusionList, + wallTime: BeaconTime) = + let res = pool.forkChoice.on_inclusion_list(pool.dag, inclusionList, wallTime) + if res.isErr(): + warn "Couldn't add inclusion list to fork choice", + validator_index = inclusionList.message.validator_index, + slot = inclusionList.message.slot, + err = res.error() + func candidateIdx( pool: AttestationPool, slot: Slot, candidateIdxType: CandidateIdxType): Opt[int] = diff --git a/beacon_chain/consensus_object_pools/inclusion_list_pool.nim b/beacon_chain/consensus_object_pools/inclusion_list_pool.nim new file mode 100644 index 0000000000..f43c497244 --- /dev/null +++ b/beacon_chain/consensus_object_pools/inclusion_list_pool.nim @@ -0,0 +1,87 @@ +# beacon_chain +# Copyright (c) 2024-2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import + # Standard libraries + std/[deques, sets], + # Internal + ../spec/datatypes/[base, focil], + ../spec/[helpers, state_transition_block], + "."/[blockchain_dag] + +export base, deques, blockchain_dag, focil + +const + INCLUSION_LISTS_BOUND = 1024'u64 # Reasonable bound for inclusion lists + +type + OnInclusionListCallback = + proc(data: SignedInclusionList) {.gcsafe, raises: [].} + + InclusionListPool* = object + ## The inclusion list pool tracks signed inclusion lists that could be + ## added to a proposed block. + + inclusion_lists*: Deque[SignedInclusionList] ## \ + ## Not a function of chain DAG branch; just used as a FIFO queue for blocks + + prior_seen_inclusion_list_validators: HashSet[uint64] ## \ + ## Records validator indices that have already submitted inclusion lists + ## to prevent duplicate processing + + dag*: ChainDAGRef + onInclusionListReceived*: OnInclusionListCallback + +func init*(T: type InclusionListPool, dag: ChainDAGRef, + onInclusionList: OnInclusionListCallback = nil): T = + ## Initialize an InclusionListPool from the dag `headState` + T( + inclusion_lists: + initDeque[SignedInclusionList](initialSize = INCLUSION_LISTS_BOUND.int), + dag: dag, + onInclusionListReceived: onInclusionList) + +func addInclusionListMessage( + subpool: var Deque[SignedInclusionList], + seenpool: var HashSet[uint64], + inclusionList: SignedInclusionList, + bound: static[uint64]) = + ## Add an inclusion list message to the pool, maintaining bounds + while subpool.lenu64 >= bound: + seenpool.excl subpool.popFirst().message.validator_index.uint64 + + subpool.addLast(inclusionList) + doAssert subpool.lenu64 <= bound + +func isSeen*(pool: InclusionListPool, msg: SignedInclusionList): bool = + ## Check if we've already seen an inclusion list from this validator + msg.message.validator_index.uint64 in pool.prior_seen_inclusion_list_validators + +proc addMessage*(pool: var InclusionListPool, msg: SignedInclusionList) = + ## Add an inclusion list message to the pool + pool.prior_seen_inclusion_list_validators.incl( + msg.message.validator_index.uint64) + + addInclusionListMessage( + pool.inclusion_lists, pool.prior_seen_inclusion_list_validators, msg, INCLUSION_LISTS_BOUND) + + # Send notification about new inclusion list via callback + if not(isNil(pool.onInclusionListReceived)): + pool.onInclusionListReceived(msg) + +func getInclusionLists*(pool: InclusionListPool): seq[SignedInclusionList] = + ## Get all inclusion lists in the pool + result = newSeq[SignedInclusionList](pool.inclusion_lists.len) + for i, inclusionList in pool.inclusion_lists: + result[i] = inclusionList + +func clear*(pool: var InclusionListPool) = + ## Clear all inclusion lists from the pool + pool.inclusion_lists.clear() + pool.prior_seen_inclusion_list_validators.clear() \ No newline at end of file diff --git a/beacon_chain/el/el_manager.nim b/beacon_chain/el/el_manager.nim index a11994b084..2868191540 100644 --- a/beacon_chain/el/el_manager.nim +++ b/beacon_chain/el/el_manager.nim @@ -27,6 +27,7 @@ from std/times import getTime, inSeconds, initTime, `-` from ../spec/engine_authentication import getSignedIatToken from ../spec/helpers import bytes_to_uint64 from ../spec/state_transition_block import kzg_commitment_to_versioned_hash +from json_rpc/router import METHOD_NOT_FOUND export eth1_chain, el_conf, engine_api, base @@ -204,6 +205,9 @@ type lastPayloadId: Opt[Bytes8] + supportsInclusionListFetch: Opt[bool] + supportsInclusionListUpdate: Opt[bool] + FullBlockId* = object number: Eth1BlockNumber hash: Hash32 @@ -463,6 +467,55 @@ proc connectedRpcClient(connection: ELConnection): Future[RpcClient] {. connection.web3.get.provider +proc getInclusionListFromSingleEL( + connection: ELConnection, + parentHash: Eth2Digest +): Future[Opt[seq[bellatrix.Transaction]]] {.async: (raises: [CatchableError]).} = + if connection.supportsInclusionListFetch.isSome and + not connection.supportsInclusionListFetch.get: + return Opt.none(seq[bellatrix.Transaction]) + + let rpcClient = await connection.connectedRpcClient() + + try: + let response = + await rpcClient.engine_getInclusionListV1(parentHash.asBlockHash) + connection.supportsInclusionListFetch = Opt.some(true) + return Opt.some(response.toConsensusTransactions()) + except ErrorResponse as exc: + if exc.status == METHOD_NOT_FOUND: + if connection.supportsInclusionListFetch.isNone: + trace "Execution client does not support engine_getInclusionListV1", + url = connection.engineUrl.url + connection.supportsInclusionListFetch = Opt.some(false) + return Opt.none(seq[bellatrix.Transaction]) + raise exc + +proc updatePayloadInclusionListForSingleEL( + connection: ELConnection, + payloadId: Bytes8, + inclusionList: InclusionList +): Future[bool] {.async: (raises: [CatchableError]).} = + if connection.supportsInclusionListUpdate.isSome and + not connection.supportsInclusionListUpdate.get: + return false + + let rpcClient = await connection.connectedRpcClient() + + try: + discard await rpcClient.engine_updatePayloadWithinInclusionListV1( + payloadId, inclusionList) + connection.supportsInclusionListUpdate = Opt.some(true) + return true + except ErrorResponse as exc: + if exc.status == METHOD_NOT_FOUND: + if connection.supportsInclusionListUpdate.isNone: + trace "Execution client does not support engine_updatePayloadWithinInclusionListV1", + url = connection.engineUrl.url + connection.supportsInclusionListUpdate = Opt.some(false) + return false + raise exc + proc getBlockByHash( rpcClient: RpcClient, hash: Hash32 @@ -1574,7 +1627,10 @@ template getBlockProposalData*(m: ELManager, func new*(T: type ELConnection, engineUrl: EngineApiUrl): T = ELConnection( engineUrl: engineUrl, - depositContractSyncStatus: DepositContractSyncStatus.unknown) + depositContractSyncStatus: DepositContractSyncStatus.unknown, + lastPayloadId: Opt[Bytes8].none, + supportsInclusionListFetch: Opt[bool].none, + supportsInclusionListUpdate: Opt[bool].none) proc new*(T: type ELManager, cfg: RuntimeConfig, diff --git a/beacon_chain/el/engine_api_conversions.nim b/beacon_chain/el/engine_api_conversions.nim index 39d5bc51f9..33ea8c01ae 100644 --- a/beacon_chain/el/engine_api_conversions.nim +++ b/beacon_chain/el/engine_api_conversions.nim @@ -9,7 +9,7 @@ import kzg4844/[kzg_abi, kzg], - ../spec/datatypes/[bellatrix, capella, deneb, electra, fulu], + ../spec/datatypes/[bellatrix, capella, deneb, electra, fulu,focil], web3/[engine_api, engine_api_types] from std/sequtils import mapIt @@ -156,6 +156,22 @@ func asElectraConsensusPayload(rpcExecutionPayload: ExecutionPayloadV3): blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64, excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64) +func toConsensusTransactions*(inclusionList: InclusionList): + seq[bellatrix.Transaction] = + ## Convert Engine API inclusion list transactions to consensus transactions. + let txs = inclusionList.distinctBase + result = newSeqOfCap[bellatrix.Transaction](txs.len) + for tx in txs: + result.add bellatrix.Transaction.init(tx.distinctBase) + +func toEngineInclusionList*(txs: seq[bellatrix.Transaction]): InclusionList = + ## Convert consensus inclusion list transactions to Engine API representation. + var engineTxs = newSeqOfCap[TypedTransaction](txs.len) + for tx in txs: + engineTxs.add TypedTransaction(tx.asSeq) + InclusionList(engineTxs) + + func asFuluConsensusPayload(rpcExecutionPayload: ExecutionPayloadV3): fulu.ExecutionPayload = template getTransaction(tt: TypedTransaction): bellatrix.Transaction = diff --git a/beacon_chain/fork_choice/fork_choice.nim b/beacon_chain/fork_choice/fork_choice.nim index 338e6fdcbc..b9ef6edb90 100644 --- a/beacon_chain/fork_choice/fork_choice.nim +++ b/beacon_chain/fork_choice/fork_choice.nim @@ -14,7 +14,7 @@ import results, chronicles, # Internal ../spec/[beaconstate, helpers, state_transition_block], - ../spec/datatypes/[phase0, altair, bellatrix], + ../spec/datatypes/[phase0, altair, bellatrix,focil], # Fork choice ./fork_choice_types, ./proto_array, ../consensus_object_pools/[spec_cache, blockchain_dag] @@ -47,6 +47,19 @@ func compute_deltas( logScope: topics = "fork_choice" +template blockBody*(blk: untyped): untyped = + when compiles(blk.body): + blk.body + else: + blk.message.body + +proc extractBlockTransactions(blk: auto): seq[bellatrix.Transaction] = + withBlck(blk): + when consensusFork >= ConsensusFork.Bellatrix: + blockBody(forkyBlck).execution_payload.transactions.asSeq + else: + @[] + func init*( T: type ForkChoiceBackend, checkpoints: FinalityCheckpoints): T = T(proto_array: ProtoArray.init(checkpoints)) @@ -71,7 +84,12 @@ proc init*( total_active_balance: epochRef.total_active_balance, balances: epochRef.effective_balances), finalized: checkpoint, - best_justified: checkpoint)) + best_justified: checkpoint), + queuedAttestations: @[], + inclusionLists: initTable[(Slot, Eth2Digest), Table[ValidatorIndex, InclusionList]](), + inclusionListEquivocators: initTable[(Slot, Eth2Digest), HashSet[ValidatorIndex]](), + unsatisfiedInclusionListBlocks: initHashSet[Eth2Digest](), + inclusionListBlocks: initTable[(Slot, Eth2Digest), HashSet[Eth2Digest]]()) func extend[T](s: var seq[T], minLen: int) = ## Extend a sequence so that it can contains at least `minLen` elements. @@ -80,6 +98,138 @@ func extend[T](s: var seq[T], minLen: int) = if s.len < minLen: s.setLen(minLen) +proc collectRequiredTransactions( + self: ForkChoice, key: (Slot, Eth2Digest)): seq[bellatrix.Transaction] = + self.inclusionLists.withValue(key, lists): + var aggregated: seq[bellatrix.Transaction] + var equivocators = initHashSet[ValidatorIndex]() + self.inclusionListEquivocators.withValue(key, eq): + equivocators = eq[] + for validator, inclusionList in lists[]: + if validator in equivocators: + continue + for tx in inclusionList.transactions.items: + if aggregated.allIt(it != tx): + aggregated.add(tx) + return aggregated + @[] + +proc applyInclusionStatus( + self: var ForkChoice, + key: (Slot, Eth2Digest), + blckRef: BlockRef, + payloadTxs: seq[bellatrix.Transaction]) = + let requiredTxs = self.collectRequiredTransactions(key) + if requiredTxs.len == 0: + if self.unsatisfiedInclusionListBlocks.contains(blckRef.root): + debug "Block satisfies inclusion list requirements (no pending transactions)", + block = shortLog(blckRef), slot = key.slot + self.unsatisfiedInclusionListBlocks.excl(blckRef.root) + return + + var missing = false + for tx in requiredTxs: + if payloadTxs.allIt(it != tx): + missing = true + break + + if missing: + if not self.unsatisfiedInclusionListBlocks.contains(blckRef.root): + notice "Marking block as missing inclusion list transactions", + block = shortLog(blckRef), slot = key.slot + self.unsatisfiedInclusionListBlocks.incl(blckRef.root) + self.mark_root_invalid(blckRef.root) + else: + if self.unsatisfiedInclusionListBlocks.contains(blckRef.root): + debug "Block now satisfies inclusion list transactions", + block = shortLog(blckRef), slot = key.slot + self.unsatisfiedInclusionListBlocks.excl(blckRef.root) + +proc updateBlockInclusionState( + self: var ForkChoice, + dag: ChainDAGRef, + key: (Slot, Eth2Digest), + blockRoot: Eth2Digest) = + let blckRef = dag.getBlockRef(blockRoot).valueOr: + return + let blockData = dag.getForkedBlock(blckRef.bid).valueOr: + return + let transactions = extractBlockTransactions(blockData) + self.applyInclusionStatus(key, blckRef, transactions) + +proc computeCommitteeRootForBlock( + dag: ChainDAGRef, blckRef: BlockRef, committeeRoot: var Eth2Digest): bool = + var state = ForkedHashedBeaconState() + let bsi = BlockSlotId.init(blckRef.bid, blckRef.slot) + if not dag.getState(bsi, state): + trace "Unable to load block state for inclusion list tracking", + block = shortLog(blckRef) + return false + + withState(state): + let committee = resolve_inclusion_list_committee(forkyState.data, blckRef.slot) + committeeRoot = compute_inclusion_list_committee_root(committee) + + true + +proc registerBlockInclusion( + self: var ForkChoice, + dag: ChainDAGRef, + blckRef: BlockRef, + blck: ForkyTrustedBeaconBlock) = + if dag.cfg.consensusForkAtEpoch(blckRef.slot.epoch) < ConsensusFork.Fulu: + return + + var committeeRoot: Eth2Digest + if not computeCommitteeRootForBlock(dag, blckRef, committeeRoot): + return + + let key = (blckRef.slot, committeeRoot) + let blockSet = self.inclusionListBlocks.mgetOrPut(key, initHashSet[Eth2Digest]()) + blockSet.incl(blckRef.root) + + let transactions = extractBlockTransactions(blck) + self.applyInclusionStatus(key, blckRef, transactions) + +proc on_inclusion_list*( + self: var ForkChoice, + dag: ChainDAGRef, + inclusionList:SignedInclusionList, + wallTime: BeaconTime): FcResult[void] = + let slot = inclusionList.message.slot + if dag.cfg.consensusForkAtEpoch(slot.epoch) < ConsensusFork.Fulu: + return ok() + + if wallTime > inclusion_list_view_freeze(slot): + trace "Ignoring late inclusion list", + slot, validator_index = inclusionList.message.validator_index + return ok() + + let key = (slot, inclusionList.message.inclusion_list_committee_root) + let validator = ValidatorIndex(inclusionList.message.validator_index) + + var entries = + self.inclusionLists.mgetOrPut(key, initTable[ValidatorIndex, InclusionList]()) + + if entries.hasKey(validator): + if entries[validator] == inclusionList.message: + return ok() + + entries.del(validator) + self.inclusionListEquivocators + .mgetOrPut(key, initHashSet[ValidatorIndex]()) + .incl(validator) + trace "Inclusion list equivocation detected", + slot, validator_index = inclusionList.message.validator_index + else: + entries[validator] = inclusionList.message + + if self.inclusionListBlocks.contains(key): + for blockRoot in self.inclusionListBlocks[key].items: + self.updateBlockInclusionState(dag, key, blockRoot) + + ok() + proc update_justified( self: var Checkpoints, dag: ChainDAGRef, blck: BlockRef, epoch: Epoch) = let @@ -286,6 +436,8 @@ proc process_block*(self: var ForkChoice, attestation.data.beacon_block_root, attestation.data.target.epoch) + self.registerBlockInclusion(dag, blckRef, blck) + trace "Integrating block in fork choice", block_root = shortLog(blckRef) @@ -389,6 +541,33 @@ func prune*(self: var ForkChoice): FcResult[void] = justified: self.checkpoints.justified.checkpoint, finalized: self.checkpoints.finalized)) +## Removes stale inclusion-list entries and rebuilds the unsatisfied set after pruning +proc prune*(self: var ForkChoice): FcResult[void] = + ? self.backend.prune( + FinalityCheckpoints( + justified: self.checkpoints.justified.checkpoint, + finalized: self.checkpoints.finalized)) + + let finalizedSlot = self.checkpoints.finalized.epoch.start_slot + + var keysToRemove: seq[(Slot, Eth2Digest)] + for key in self.inclusionLists.keys: + if key.slot < finalizedSlot: + keysToRemove.add(key) + + for key in keysToRemove: + discard self.inclusionLists.del(key) + discard self.inclusionListEquivocators.del(key) + discard self.inclusionListBlocks.del(key) + + var stillValid = initHashSet[Eth2Digest]() + for root in self.unsatisfiedInclusionListBlocks.items: + if self.backend.proto_array.indices.contains(root): + stillValid.incl(root) + self.unsatisfiedInclusionListBlocks = stillValid + + ok() + func mark_root_invalid*(self: var ForkChoice, root: Eth2Digest) = try: let nodePhysicalIdx = diff --git a/beacon_chain/fork_choice/fork_choice_types.nim b/beacon_chain/fork_choice/fork_choice_types.nim index 743ec59958..8ac660ddad 100644 --- a/beacon_chain/fork_choice/fork_choice_types.nim +++ b/beacon_chain/fork_choice/fork_choice_types.nim @@ -142,6 +142,17 @@ type backend*: ForkChoiceBackend checkpoints*: Checkpoints queuedAttestations*: seq[QueuedAttestation] + # keeps the actual inclusion lists keyed by validator, so we can aggregate every transaction required for a given slot/committee. + inclusionLists*: Table[(Slot, Eth2Digest), Table[ValidatorIndex, InclusionList]] + # Keeps Equivocation tracking for inclusion lists + # we can drop this but then we will have to re-derive equivocators from inclusion lists on restart + inclusionListEquivocators*: Table[(Slot, Eth2Digest), HashSet[ValidatorIndex]] + # Blocks who dont satisfy inclusion lists condition yet we flip proto-array invalid bits from here so fork choice avoids those roots until they comply. + unsatisfiedInclusionListBlocks*: HashSet[Eth2Digest] + # we record which inclusion-list committee governs its payload by hashing the committee. + # Later, if a late inclusion list arrives, we look up that key and re- check only those blocks, instead of scanning the entire proto-array. + # Table from (slot, committeeRoot) to set of block roots. + inclusionListBlocks*: Table[(Slot, Eth2Digest), HashSet[Eth2Digest]] func shortLog*(vote: VoteTracker): auto = ( diff --git a/beacon_chain/gossip_processing/batch_validation.nim b/beacon_chain/gossip_processing/batch_validation.nim index bd700996da..78233ef2d9 100644 --- a/beacon_chain/gossip_processing/batch_validation.nim +++ b/beacon_chain/gossip_processing/batch_validation.nim @@ -14,7 +14,8 @@ import # Status chronicles, chronos, chronos/threadsync, ../spec/signatures_batch, - ../consensus_object_pools/[blockchain_dag, spec_cache] + ../consensus_object_pools/[blockchain_dag, spec_cache], + ../spec/datatypes/focil export signatures_batch, blockchain_dag @@ -577,3 +578,28 @@ proc scheduleBlsToExecutionChangeCheck*( pubkey, sig) ok((fut, sig)) + +proc scheduleInclusionListCheck*( + batchCrypto: ref BatchCrypto, + fork: Fork, + message: InclusionList, + pubkey: CookedPubKey, + signature: ValidatorSig): + Result[tuple[fut: FutureBatchResult, sig: CookedSig], cstring] = + ## Schedule crypto verification of an inclusion list signature + ## + ## The buffer is processed: + ## - when eager processing is enabled and the batch is full + ## - otherwise after 10ms (BatchAttAccumTime) + ## + ## This returns an error if crypto sanity checks failed + ## and a future with the deferred check otherwise. + + let + sig = signature.load().valueOr: + return err("InclusionList: cannot load signature") + fut = batchCrypto.verifySoon("scheduleInclusionListCheck"): + inclusion_list_signature_set( + fork, batchCrypto[].genesis_validators_root, message, pubkey, sig) + + ok((fut, sig)) diff --git a/beacon_chain/gossip_processing/eth2_processor.nim b/beacon_chain/gossip_processing/eth2_processor.nim index 3d1382c4de..31ab0885f2 100644 --- a/beacon_chain/gossip_processing/eth2_processor.nim +++ b/beacon_chain/gossip_processing/eth2_processor.nim @@ -667,7 +667,36 @@ proc processSignedContributionAndProof*( beacon_sync_committee_contributions_dropped.inc(1, [$v.error[0]]) err(v.error()) + +# https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/_features/eip7805/p2p-interface.md#global-topics +proc processInclusionList*( + self: ref Eth2Processor, src: MsgSource, + inclusionList: SignedInclusionList, + checkSignature: bool = true): Future[ValidationRes] + {.async: (raises: [CancelledError]).} = + let wallTime = self.getCurrentBeaconTime() + + logScope: + validator_index = inclusionList.message.validator_index + slot = inclusionList.message.slot + wallSlot = wallTime.slotOrZero + let res = await validateInclusionList( + self.inclusionListPool[], self.dag, self.batchCrypto, + inclusionList, wallTime, checkSignature) + + return if res.isOk(): + beacon_inclusion_lists_received.inc() + self.attestationPool[].onInclusionList(inclusionList, wallTime) + ok() + else: + let errVal = res.error() + debug "Dropping inclusion list", + validator_index = inclusionList.message.validator_index, + slot = inclusionList.message.slot, + reason = $errVal + beacon_inclusion_lists_dropped.inc(1, [$errVal[0]]) + err(errVal) # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update proc processLightClientFinalityUpdate*( self: var Eth2Processor, src: MsgSource, diff --git a/beacon_chain/gossip_processing/gossip_validation.nim b/beacon_chain/gossip_processing/gossip_validation.nim index e0f3e7e851..2bd25fe615 100644 --- a/beacon_chain/gossip_processing/gossip_validation.nim +++ b/beacon_chain/gossip_processing/gossip_validation.nim @@ -13,14 +13,15 @@ import results, kzg4844/[kzg, kzg_abi], stew/byteutils, + ssz_serialization/types as sszTypes, # Internals ../spec/[ - beaconstate, state_transition_block, forks, - helpers, network, signatures, peerdas_helpers], + beaconstate, state_transition_block, forks, datatypes/focil, + helpers, network, signatures, peerdas_helpers, focil_helpers], ../consensus_object_pools/[ attestation_pool, blockchain_dag, blob_quarantine, block_quarantine, data_column_quarantine, spec_cache, light_client_pool, sync_committee_msg_pool, - validator_change_pool], + validator_change_pool, inclusion_list_pool], ".."/[beacon_clock], ./batch_validation @@ -1893,3 +1894,98 @@ proc validateLightClientOptimisticUpdate*( pool.latestForwardedOptimisticSlot = attested_slot ok() + +# https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7805/p2p-interface.md#global-topics +proc validateInclusionList*( + pool: var InclusionListPool, dag: ChainDAGRef, + batchCrypto: ref BatchCrypto, + signed_inclusion_list: SignedInclusionList, + wallTime: BeaconTime, checkSignature: bool): + Future[Result[CookedSig, ValidationError]] {.async: (raises: [CancelledError]).} = + ## Validate a signed inclusion list according to the EIP-7805 specification + + template message: untyped = signed_inclusion_list.message + + if dag.cfg.consensusForkAtEpoch(message.slot.epoch) < ConsensusFork.Fulu: + return dag.checkedReject("InclusionList: received before Fulu fork") + + # [REJECT] The size of message.transactions is within upperbound MAX_BYTES_PER_INCLUSION_LIST. + var totalSize: uint64 = 0 + for transaction in message.transactions: + totalSize += uint64(transaction.len) + if totalSize > MAX_BYTES_PER_INCLUSION_LIST: + return dag.checkedReject("InclusionList: transactions size exceeds MAX_BYTES_PER_INCLUSION_LIST") + + # [REJECT] The slot message.slot is equal to the previous or current slot. + let currentSlot = wallTime.slotOrZero + if not (message.slot == currentSlot or message.slot == currentSlot - 1): + return dag.checkedReject("InclusionList: slot must be current or previous slot") + + # [IGNORE] The slot message.slot is equal to the current slot, or it is equal to the previous slot and the current time is less than ATTESTATION_DEADLINE seconds into the slot. + if message.slot == currentSlot - 1: + let slotStartTime = message.slot.start_beacon_time() + let currentTime = wallTime + if currentTime >= slotStartTime + ATTESTATION_DEADLINE: + return errIgnore("InclusionList: previous slot inclusion list received after deadline") + + # [IGNORE] The inclusion_list_committee for slot message.slot on the current branch corresponds to message.inclusion_list_committee_root, as determined by hash_tree_root(inclusion_list_committee) == message.inclusion_list_committee_root. + withState(dag.headState): + let committee = resolve_inclusion_list_committee(forkyState.data, message.slot) + # Note: We need to convert the HashSet to a sequence for hash_tree_root + var committeeList: List[uint64, Limit INCLUSION_LIST_COMMITTEE_SIZE] + for validator in committee: + if not committeeList.add(validator): + raiseAssert "Committee list overflowed its maximum size" + let committeeRoot = hash_tree_root(committeeList) + if committeeRoot != message.inclusion_list_committee_root: + return errIgnore("InclusionList: inclusion list committee root mismatch") + + # [REJECT] The validator index message.validator_index is within the inclusion_list_committee corresponding to message.inclusion_list_committee_root. + withState(dag.headState): + let committee = resolve_inclusion_list_committee(forkyState.data, message.slot) + if message.validator_index notin committee: + return dag.checkedReject("InclusionList: validator not in inclusion list committee") + + # [IGNORE] The message is either the first or second valid message received from the validator with index message.validator_index. + if pool.isSeen(signed_inclusion_list): + return errIgnore("InclusionList: already received inclusion list from this validator") + + # [REJECT] The signature of inclusion_list.signature is valid with respect to the validator index. + let sig = + if checkSignature: + withState(dag.headState): + let + pubkey = dag.validatorKey(message.validator_index).valueOr: + return dag.checkedReject("InclusionList: invalid validator index") + let deferredCrypto = batchCrypto.scheduleInclusionListCheck( + dag.forkAtEpoch(message.slot.epoch), + message, pubkey, signed_inclusion_list.signature) + if deferredCrypto.isErr(): + return dag.checkedReject(deferredCrypto.error) + + let (cryptoFut, sig) = deferredCrypto.get() + # Await the crypto check + let x = (await cryptoFut) + case x + of BatchResult.Invalid: + return dag.checkedReject("InclusionList: invalid signature") + of BatchResult.Timeout: + return errIgnore("InclusionList: timeout checking signature") + of BatchResult.Valid: + sig # keep going only in this case + else: + signed_inclusion_list.signature.load().valueOr: + return dag.checkedReject("InclusionList: unable to load signature") + + # Add the inclusion list to the pool + pool.addMessage(signed_inclusion_list) + + withState(dag.headState): + let store = get_inclusion_list_store() + process_inclusion_list( + store, + forkyState.data, + signed_inclusion_list, + wallTime) + + ok(sig) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 782a76244e..5943fe4856 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -416,6 +416,7 @@ proc initFullNode( blobQuarantine = newClone(BlobQuarantine.init( dag.cfg, onBlobSidecarAdded)) dataColumnQuarantine = newClone(DataColumnQuarantine.init()) + inclusionListStore = newClone(InclusionListStore.init()) supernode = node.config.peerdasSupernode localCustodyGroups = if supernode: @@ -2641,4 +2642,4 @@ programMain: else: handleStartUpCmd(config) else: - handleStartUpCmd(config) \ No newline at end of file + handleStartUpCmd(config) diff --git a/beacon_chain/spec/datatypes/constants.nim b/beacon_chain/spec/datatypes/constants.nim index 442d67c6ff..4298cc7e11 100644 --- a/beacon_chain/spec/datatypes/constants.nim +++ b/beacon_chain/spec/datatypes/constants.nim @@ -87,3 +87,6 @@ const DEPOSIT_REQUEST_TYPE* = 0x00'u8 WITHDRAWAL_REQUEST_TYPE* = 0x01'u8 CONSOLIDATION_REQUEST_TYPE* = 0x02'u8 + + # https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7805/p2p-interface.md#configuration + MAX_REQUEST_INCLUSION_LIST*: uint64 = 16 # 2**4 diff --git a/beacon_chain/spec/datatypes/focil.nim b/beacon_chain/spec/datatypes/focil.nim index 1f80a75a06..1b664137ef 100644 --- a/beacon_chain/spec/datatypes/focil.nim +++ b/beacon_chain/spec/datatypes/focil.nim @@ -43,17 +43,20 @@ export json_serialization, base const # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/_features/eip7805/beacon-chain.md#domain-types - DOMAIN_INCLUSION_LIST_COMMITTEE* = DomainType([byte 0x0c, 0x00, 0x00, 0x00]) + DOMAIN_INCLUSION_LIST_COMMITTEE* = DomainType([byte 0x0C, 0x00, 0x00, 0x00]) + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/_features/eip7805/beacon-chain.md#preset INCLUSION_LIST_COMMITTEE_SIZE* = 16'u64 - # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/_features/eip7805/fork-choice.md#time-parameters - VIEW_FREEZE_DEADLINE* = (SECONDS_PER_SLOT * 2 div 3 + 1).seconds - # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/_features/eip7805/p2p-interface.md#configuration - ATTESTATION_DEADLINE* = (SECONDS_PER_SLOT div 3).seconds + # https://github.com/ethereum/consensus-specs/blob/master/specs/_features/eip7805/fork-choice.md#configuration + VIEW_FREEZE_DEADLINE* = chronos.seconds (SECONDS_PER_SLOT * 3 div 4 ) + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/_features/eip7805/validator.md#configuration + INCLUSION_LIST_SUBMISSION_DUE = chronos.seconds (SECONDS_PER_SLOT * 2 div 3) + PROPOSER_INCLUSION_LIST_CUT_OFF = chronos.seconds (SECONDS_PER_SLOT - 1) + + MAX_REQUEST_INCLUSION_LIST* = 16'u64 MAX_BYTES_PER_INCLUSION_LIST* = 8192'u64 - # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/_features/eip7805/validator.md#configuration - PROPOSER_INCLUSION_LIST_CUT_OFF = (SECONDS_PER_SLOT - 1).seconds + type # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/_features/eip7805/beacon-chain.md#inclusionlist @@ -67,3 +70,116 @@ type SignedInclusionList* = object message*: InclusionList signature*: ValidatorSig + + InclusionListKey* = tuple[slot: Slot, committeeRoot: Eth2Digest] + + InclusionListStore* = object + ## Inclusion lists accepted prior to the view-freeze deadline. + inclusionLists*: Table[InclusionListKey, seq[InclusionList]] + ## Tracking of validators that equivocated for a particular (slot, root). + equivocators*: Table[InclusionListKey, HashSet[ValidatorIndex]] + +template makeKey*(slot: Slot, root: Eth2Digest): InclusionListKey = + (slot: slot, committeeRoot: root) + +proc init*(T: typedesc[InclusionListStore]): T = + InclusionListStore( + inclusionLists: initTable[InclusionListKey, seq[InclusionList]](), + equivocators: initTable[InclusionListKey, HashSet[ValidatorIndex]](), + ) + +template mgetOrPutSeq(tab: var Table[InclusionListKey, seq[InclusionList]], + key: InclusionListKey): var seq[InclusionList] = + tab.mgetOrPut(key, @[]) + +template mgetOrPutSet(tab: var Table[InclusionListKey, HashSet[ValidatorIndex]], + key: InclusionListKey): var HashSet[ValidatorIndex] = + tab.mgetOrPut(key, initHashSet[ValidatorIndex]()) + +proc markEquivocator( + store: var InclusionListStore, key: InclusionListKey, validator: ValidatorIndex +) = + store.equivocators.mgetOrPutSet(key).incl(validator) + +proc isKnownEquivocator( + store: InclusionListStore, key: InclusionListKey, validator: ValidatorIndex +): bool = + store.equivocators.withValue(key, equivocators): + return validator in equivocators[] + false + +proc process_inclusion_list*( + store: var InclusionListStore, + inclusionList: InclusionList, + accept: bool +) {.raises: [].} = + ## Record `inclusionList` if `accept` is true. Validators that equivocate are + ## remembered and future lists from them are ignored. + let key = makeKey(inclusionList.slot, inclusionList.inclusion_list_committee_root) + + if store.isKnownEquivocator(key, inclusionList.validator_index): + return + + var lists = store.inclusionLists.mgetOrPutSeq(key) + for idx, existing in lists.pairs: + if existing.validator_index != inclusionList.validator_index: + continue + + if existing == inclusionList: + return + + # Equivocation detected: drop previous entry and mark validator. + store.markEquivocator(key, inclusionList.validator_index) + lists.delete(idx) + return + + if accept: + lists.add(inclusionList) + +proc getInclusionListsForKey*( + store: InclusionListStore, key: InclusionListKey +): seq[InclusionList] = + store.inclusionLists.withValue(key, value): + return value[] + @[] + +proc getEquivocatorsForKey*( + store: InclusionListStore, key: InclusionListKey +): HashSet[ValidatorIndex] = + store.equivocators.withValue(key, equivocators): + return equivocators[] + initHashSet[ValidatorIndex]() + +proc prune*(store: var InclusionListStore, keepFromSlot: Slot) {.raises: [].} = + ## Drop entries for slots older than `keepFromSlot`. + var toDelete: seq[InclusionListKey] + for key in store.inclusionLists.keys: + if key.slot < keepFromSlot: + toDelete.add(key) + + for key in toDelete: + discard store.inclusionLists.del(key) + discard store.equivocators.del(key) + +## Temporary global store. +## TODO: wire a beacon-node owned instance once the +## gossip integration for inclusion lists lands. +var globalInclusionListStore*: ref InclusionListStore + +proc ensureGlobalStore() = + if globalInclusionListStore.isNil: + globalInclusionListStore = new(InclusionListStore) + globalInclusionListStore[] = InclusionListStore.init() + +proc get_inclusion_list_store*(): var InclusionListStore = + ensureGlobalStore() + globalInclusionListStore[] + +proc setGlobalInclusionListStore*(store: ref InclusionListStore) = + globalInclusionListStore = store + +proc resetGlobalInclusionListStore*() = + if not globalInclusionListStore.isNil: + globalInclusionListStore[] = InclusionListStore.init() + else: + ensureGlobalStore() diff --git a/beacon_chain/spec/focil_helpers.nim b/beacon_chain/spec/focil_helpers.nim index 3b967489d3..3d404d59d1 100644 --- a/beacon_chain/spec/focil_helpers.nim +++ b/beacon_chain/spec/focil_helpers.nim @@ -9,7 +9,7 @@ # Uncategorized helper functions from the spec import - std/[algorithm, sequtils], + std/[algorithm], results, eth/p2p/discoveryv5/[node], kzg4844/[kzg], @@ -21,8 +21,40 @@ import validator], ./datatypes/[fulu, focil] +const + viewFreezeOffset = slotOffset(VIEW_FREEZE_CUTOFF) + submissionDueOffset = slotOffset(INCLUSION_LIST_SUBMISSION_DUE) + proposerCutoffOffset = slotOffset(PROPOSER_INCLUSION_LIST_CUT_OFF) + +func inclusion_list_view_freeze*(slot: Slot): BeaconTime = + slot.start_beacon_time() + viewFreezeOffset + +func inclusion_list_submission_due*(slot: Slot): BeaconTime = + slot.start_beacon_time() + submissionDueOffset + +func inclusion_list_proposer_cutoff*(slot: Slot): BeaconTime = + slot.start_beacon_time() + proposerCutoffOffset + +func get_view_freeze_cutoff_ms*(): uint64 = + uint64(viewFreezeOffset.nanoseconds div 1_000_000) + +func get_inclusion_list_submission_due_ms*(): uint64 = + uint64(submissionDueOffset.nanoseconds div 1_000_000) + +func get_proposer_inclusion_list_cutoff_ms*(): uint64 = + uint64(proposerCutoffOffset.nanoseconds div 1_000_000) + +proc compute_inclusion_list_committee_root*( + committee: HashSet[uint64]): Eth2Digest = + ## Compute the SSZ root of the inclusion list committee + var committeeList: sszTypes.List[uint64, + sszTypes.Limit INCLUSION_LIST_COMMITTEE_SIZE] + for validator in committee: + discard committeeList.add(validator) + hash_tree_root(committeeList) + # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/_features/eip7805/beacon-chain.md#new-is_valid_inclusion_list_signature -func verify_inclusion_list_signature*( +func is_valid_inclusion_list_signature*( state: ForkyBeaconState, signed_inclusion_list: SignedInclusionList): bool = ## Check if the `signed_inclusion_list` has a valid signature @@ -34,31 +66,31 @@ func verify_inclusion_list_signature*( message.slot.epoch()) signing_root = compute_signing_root(message, domain) - blsVerify(pubkey, signing_root.data, signature) + blsVerify(pubkey, signing_root.data, signed_inclusion_list.signature) # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.2/specs/_features/eip7805/beacon-chain.md#new-get_inclusion_list_committee func resolve_inclusion_list_committee*( state: ForkyBeaconState, - slot: Slot): HashSet[ValidatorIndex] = + slot: Slot): HashSet[uint64] = ## Return the inclusion list committee for the given slot let seed = get_seed(state, slot.epoch(), DOMAIN_INCLUSION_LIST_COMMITTEE) indices = - get_active_validator_indices(state, epoch) + get_active_validator_indices(state, slot.epoch()) start = (slot mod SLOTS_PER_EPOCH) * INCLUSION_LIST_COMMITTEE_SIZE end_i = start + INCLUSION_LIST_COMMITTEE_SIZE seq_len {.inject.} = indices.lenu64 - var res: HashSet[ValidatorIndex] + var res: HashSet[uint64] for i in 0.. latestTolerated: + # Way past the proposer cutoff; nothing to be gained from processing. + return + + if not verify_inclusion_list_signature(state, signed_inclusion_list): + return + + let + committee = get_inclusion_list_committee(state, message.slot) + committeeRoot = compute_inclusion_list_committee_root(committee) + + if message.inclusion_list_committee_root != committeeRoot: + return + + let acceptBeforeFreeze = + wallTime <= inclusion_list_view_freeze(message.slot) + + store.process_inclusion_list(message, accept = acceptBeforeFreeze) + +proc get_inclusion_list_transactions*( + store: InclusionListStore, state: ForkyBeaconState, slot: Slot +): seq[bellatrix.Transaction] = + ## Collect the unique transactions from valid inclusion lists for ``slot`` + let + committee = get_inclusion_list_committee(state, slot) + committeeRoot = compute_inclusion_list_committee_root(committee) + key = makeKey(slot, committeeRoot) + equivocators = store.getEquivocatorsForKey(key) + + var aggregated: seq[bellatrix.Transaction] + for inclusionList in store.getInclusionListsForKey(key): + if inclusionList.validator_index in equivocators: + continue + for tx in inclusionList.transactions.items: + if tx notin aggregated: + aggregated.add(tx) + + aggregated diff --git a/beacon_chain/spec/network.nim b/beacon_chain/spec/network.nim index 3a89bfda5d..44ac6ae281 100644 --- a/beacon_chain/spec/network.nim +++ b/beacon_chain/spec/network.nim @@ -16,12 +16,14 @@ export base const # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#topics-and-messages # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/capella/p2p-interface.md#topics-and-messages + # https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7805/p2p-interface.md#topics-and-messages topicBeaconBlocksSuffix = "beacon_block/ssz_snappy" topicVoluntaryExitsSuffix = "voluntary_exit/ssz_snappy" topicProposerSlashingsSuffix = "proposer_slashing/ssz_snappy" topicAttesterSlashingsSuffix = "attester_slashing/ssz_snappy" topicAggregateAndProofsSuffix = "beacon_aggregate_and_proof/ssz_snappy" topicBlsToExecutionChangeSuffix = "bls_to_execution_change/ssz_snappy" + topicInclusionListSuffix = "inclusion_list/ssz_snappy" const # The spec now includes this as a bare uint64 as `RESP_TIMEOUT` @@ -68,6 +70,10 @@ func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string = func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string = eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix +# https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7805/p2p-interface.md#topics-and-messages +func getInclusionListTopic*(forkDigest: ForkDigest): string = + eth2Prefix(forkDigest) & topicInclusionListSuffix + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.2/specs/phase0/validator.md#broadcast-attestation func compute_subnet_for_attestation*( committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex): diff --git a/beacon_chain/spec/signatures.nim b/beacon_chain/spec/signatures.nim index 726c1b42ee..2c3c40eef2 100644 --- a/beacon_chain/spec/signatures.nim +++ b/beacon_chain/spec/signatures.nim @@ -17,7 +17,7 @@ ## functions. import - ./datatypes/[phase0, altair, bellatrix], ./helpers, ./eth2_merkleization + ./datatypes/[phase0, altair, bellatrix, focil], ./helpers, ./eth2_merkleization from ./datatypes/capella import BLSToExecutionChange, SignedBLSToExecutionChange @@ -424,3 +424,27 @@ proc verify_bls_to_execution_change_signature*( let signing_root = compute_bls_to_execution_change_signing_root( genesisFork, genesis_validators_root, msg.message) blsVerify(pubkey, signing_root.data, signature) + +# https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7805/validator.md#constructing-a-signed-inclusion-list +func compute_inclusion_list_signing_root*( + fork: Fork, genesis_validators_root: Eth2Digest, + message: InclusionList): Eth2Digest = + let domain = get_domain( + fork, DOMAIN_INCLUSION_LIST_COMMITTEE, message.slot.epoch(), + genesis_validators_root) + compute_signing_root(message, domain) + +func get_inclusion_list_signature*( + fork: Fork, genesis_validators_root: Eth2Digest, + message: InclusionList, privkey: ValidatorPrivKey): CookedSig = + let signing_root = compute_inclusion_list_signing_root( + fork, genesis_validators_root, message) + blsSign(privkey, signing_root.data) + +proc verify_inclusion_list_signature*( + fork: Fork, genesis_validators_root: Eth2Digest, + message: InclusionList, + pubkey: ValidatorPubKey | CookedPubKey, signature: SomeSig): bool = + let signing_root = compute_inclusion_list_signing_root( + fork, genesis_validators_root, message) + blsVerify(pubkey, signing_root.data, signature) \ No newline at end of file diff --git a/beacon_chain/spec/signatures_batch.nim b/beacon_chain/spec/signatures_batch.nim index f62b9c0180..9337b33997 100644 --- a/beacon_chain/spec/signatures_batch.nim +++ b/beacon_chain/spec/signatures_batch.nim @@ -21,7 +21,7 @@ import bearssl/rand, # Internal "."/[helpers, beaconstate, forks, signatures], - "."/datatypes/[altair, bellatrix, phase0] + "."/datatypes/[altair, bellatrix, phase0, focil] export results, rand, altair, phase0, taskpools, signatures @@ -227,6 +227,16 @@ func bls_to_execution_change_signature_set*( SignatureSet.init(pubkey, signing_root, signature) +# https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7805/validator.md#constructing-a-signed-inclusion-list +func inclusion_list_signature_set*( + fork: Fork, genesis_validators_root: Eth2Digest, + message: InclusionList, + pubkey: CookedPubKey, signature: CookedSig): SignatureSet = + let signing_root = compute_inclusion_list_signing_root( + fork, genesis_validators_root, message) + + SignatureSet.init(pubkey, signing_root, signature) + proc collectProposerSignatureSet*( sigs: var seq[SignatureSet], blocks: openArray[ForkedSignedBeaconBlock], diff --git a/beacon_chain/spec/state_transition_block.nim b/beacon_chain/spec/state_transition_block.nim index 5299179c4e..35a7734b41 100644 --- a/beacon_chain/spec/state_transition_block.nim +++ b/beacon_chain/spec/state_transition_block.nim @@ -1121,6 +1121,71 @@ proc process_execution_payload*( ok() +# Focil based changes On Fulu +# https://github.com/ethereum/consensus-specs/blob/26bb70739ac7ab1ec1f6a9b47ca8e47db0c14848/specs/_features/eip7805/beacon-chain.md#modified-verify_and_notify_new_payload +proc process_execution_payload*( + cfg: RuntimeConfig, state: var fulu.BeaconState, + body: SomeFuluBeaconBlockBody, + notify_new_payload: fulu.ExecutePayload): Result[void, cstring] = + template payload: auto = body.execution_payload + + # Verify consistency of the parent hash with respect to the previous + # execution payload header + if not (payload.parent_hash == + state.latest_execution_payload_header.block_hash): + return err("process_execution_payload: payload and state parent hash mismatch") + + # Verify prev_randao + if not (payload.prev_randao == get_randao_mix(state, get_current_epoch(state))): + return err("process_execution_payload: payload and state randomness mismatch") + + # Verify timestamp + if not (payload.timestamp == compute_timestamp_at_slot(state, state.slot)): + return err("process_execution_payload: invalid timestamp") + + # [New in Deneb] Verify commitments are under limit + if not (lenu64(body.blob_kzg_commitments) <= cfg.MAX_BLOBS_PER_BLOCK_ELECTRA): + return err("process_execution_payload: too many KZG commitments") + + # Verify inclusion list transactions + block: + let inclusionStore = get_inclusion_list_store() + let requiredTxs = get_inclusion_list_transactions(inclusionStore, state, state.slot) + if requiredTxs.len > 0: + let payloadTxs = payload.transactions.asSeq + for tx in requiredTxs: + if tx notin payloadTxs: + # NOTE: This currently assumes the gossip-fed inclusion list store is + # authoritative for the slot. Once we wire in precise timing + # validation for view-freeze deadlines, this guard should be tightened. + return err("process_execution_payload: missing inclusion list tx") + + # Verify the execution payload is valid + if not notify_new_payload(payload): + return err("process_execution_payload: execution payload invalid") + + # Cache execution payload header + state.latest_execution_payload_header = fulu.ExecutionPayloadHeader( + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + extra_data: payload.extra_data, + transactions_root: hash_tree_root(payload.transactions), + withdrawals_root: hash_tree_root(payload.withdrawals), + blob_gas_used: payload.blob_gas_used, + excess_blob_gas: payload.excess_blob_gas) + + ok() + # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#new-process_withdrawals # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-process_withdrawals func process_withdrawals*( @@ -1414,4 +1479,4 @@ proc process_block*( operations_rewards.sync_aggregate = ? process_sync_aggregate( state, blck.body.sync_aggregate, total_active_balance, flags, cache) - ok(operations_rewards) \ No newline at end of file + ok(operations_rewards) diff --git a/beacon_chain/validators/beacon_validators.nim b/beacon_chain/validators/beacon_validators.nim index 963bf75b4d..69e1a34ed1 100644 --- a/beacon_chain/validators/beacon_validators.nim +++ b/beacon_chain/validators/beacon_validators.nim @@ -1970,6 +1970,91 @@ proc handleFallbackAttestations(node: BeaconNode, lastSlot, slot: Slot) = sendAttestations(node, attestationHead.blck, slot) +proc produceInclusionLists( + node: BeaconNode, head: BlockRef, slot: Slot +): Future[void] {.async: (raises: [CancelledError]).} = + ## Construct inclusion lists for local validators assigned to the slot. + if node.dag.cfg.consensusForkAtEpoch(slot.epoch) < ConsensusFork.Fulu: + return + + let parentExecutionHash = node.dag.loadExecutionBlockHash(head).valueOr: + debug "Skipping inclusion list production; missing execution parent", + head = shortLog(head) + return + + if parentExecutionHash.isZero: + return + + var cache = StateCache() + let proposalStateRes = node.dag.getProposalState(head, slot, cache) + if proposalStateRes.isErr: + warn "Unable to compute proposal state for inclusion list", + head = shortLog(head), slot, err = proposalStateRes.error() + return + let proposalState = proposalStateRes.get + + let committee = block: + withState(proposalState[]): + resolve_inclusion_list_committee(forkyState.data, slot) + + if committee.len == 0: + return + + let inclusionTxs = await node.elManager.getInclusionList(parentExecutionHash) + + let limitedTxs = + if inclusionTxs.len > int(MAX_TRANSACTIONS_PER_PAYLOAD): + warn "Execution client returned excess inclusion list transactions; truncating", + returned = inclusionTxs.len, + limit = int(MAX_TRANSACTIONS_PER_PAYLOAD) + inclusionTxs[0 ..< int(MAX_TRANSACTIONS_PER_PAYLOAD)] + else: + inclusionTxs + + let committeeRoot = compute_inclusion_list_committee_root(committee) + + let transactions = + List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(limitedTxs) + let fork = node.dag.forkAtEpoch(slot.epoch) + let genesisRoot = node.dag.genesis_validators_root + let wallTime = node.beaconClock.now() + + var store = get_inclusion_list_store() + + for validator in committee: + let validatorIndex = ValidatorIndex(validator) + let attached = node.getValidatorForDuties(validatorIndex, slot).valueOr: + continue + + var message = InclusionList( + slot: slot, + validator_index: validator, + inclusion_list_committee_root: committeeRoot, + transactions: transactions) + var signed = SignedInclusionList(message: message) + + let sigRes = await attached.getInclusionListSignature( + fork, genesisRoot, signed.message) + if sigRes.isErr: + let errMsg = sigRes.error() + if errMsg.contains("Remote signer does not support inclusion list signing"): + debug "Skipping inclusion list due to unsupported remote signer", + validator_index = validatorIndex, + slot + else: + warn "Unable to sign inclusion list", + validator_index = validatorIndex, + slot, error = errMsg + continue + + signed.signature = sigRes.get() + + withState(proposalState[]): + store.process_inclusion_list(forkyState.data, signed, wallTime) + + store.prune(node.dag.finalizedHead.slot) + await node.elManager.updatePayloadInclusionList(limitedTxs) + proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async: (raises: [CancelledError]).} = ## Perform validator duties - create blocks, vote and aggregate existing votes if node.attachedValidators[].count == 0: @@ -2006,6 +2091,8 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async: (ra withState(node.dag.headState): node.updateValidators(forkyState.data.validators.asSeq()) + await produceInclusionLists(node, head, slot) + let newHead = await handleProposal(node, head, slot) head = newHead @@ -2174,4 +2261,4 @@ proc makeMaybeBlindedBeaconBlockForHeadAndSlot*( makeMaybeBlindedBeaconBlockForHeadAndSlotImpl[ResultType]( node, consensusFork, randao_reveal, graffiti, head, slot, - builderBoostFactor) \ No newline at end of file + builderBoostFactor) diff --git a/beacon_chain/validators/validator_pool.nim b/beacon_chain/validators/validator_pool.nim index 246e2819bb..5f07517dfa 100644 --- a/beacon_chain/validators/validator_pool.nim +++ b/beacon_chain/validators/validator_pool.nim @@ -976,6 +976,20 @@ proc getSlotSignature*(v: AttachedValidator, fork: Fork, v.slotSignature = Opt.some((slot, signature.get)) return signature +proc getInclusionListSignature*(v: AttachedValidator, fork: Fork, + genesis_validators_root: Eth2Digest, + inclusion_list: InclusionList + ): Future[SignatureResult] + {.async: (raises: [CancelledError]).} = + case v.kind + of ValidatorKind.Local: + let sig = get_inclusion_list_signature( + fork, genesis_validators_root, inclusion_list, v.data.privateKey) + SignatureResult.ok(sig.toValidatorSig()) + of ValidatorKind.Remote: + # TODO: Implement inclusion list signing for remote signers + SignatureResult.err("Remote signer does not support inclusion list signing") + proc getValidatorExitSignature*(v: AttachedValidator, fork: Fork, genesis_validators_root: Eth2Digest, voluntary_exit: VoluntaryExit diff --git a/vendor/nim-web3 b/vendor/nim-web3 index d8a91d0409..7d85d5b1db 160000 --- a/vendor/nim-web3 +++ b/vendor/nim-web3 @@ -1 +1 @@ -Subproject commit d8a91d040975cd3dd2a10c26456fab2d7523e8dd +Subproject commit 7d85d5b1db21e0c612c5dfe2dd608dd254da82f4